title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
ENH: Add return_inverse to cython-unique; unify unique/factorize-code | diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in
index a71023ed34f44..7f4c2a6410870 100644
--- a/pandas/_libs/hashtable_class_helper.pxi.in
+++ b/pandas/_libs/hashtable_class_helper.pxi.in
@@ -356,11 +356,12 @@ cdef class {{name}}HashTable(HashTable):
@cython.boundscheck(False)
@cython.wraparound(False)
- def _factorize(self, const {{dtype}}_t[:] values, {{name}}Vector uniques,
- Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
- object na_value=None):
+ def _unique(self, const {{dtype}}_t[:] values, {{name}}Vector uniques,
+ Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
+ object na_value=None, bint ignore_na=False,
+ bint return_inverse=False):
"""
- Calculate unique values and labels (no sorting); ignores all NA-values
+ Calculate unique values and labels (no sorting!)
Parameters
----------
@@ -374,13 +375,22 @@ cdef class {{name}}HashTable(HashTable):
Sentinel value used for all NA-values in inverse
na_value : object, default None
Value to identify as missing. If na_value is None, then
- any value satisfying val!=val are considered missing.
+ any value "val" satisfying val != val is considered missing.
+ If na_value is not None, then _additionally_, any value "val"
+ satisfying val == na_value is considered missing.
+ ignore_na : boolean, default False
+ Whether NA-values should be ignored for calculating the uniques. If
+ True, the labels corresponding to missing values will be set to
+ na_sentinel.
+ return_inverse : boolean, default False
+ Whether the mapping of the original array values to their location
+ in the vector of uniques should be returned.
Returns
-------
uniques : ndarray[{{dtype}}]
Unique values of input, not sorted
- labels : ndarray[int64]
+ labels : ndarray[int64] (if return_inverse=True)
The labels from values to uniques
"""
cdef:
@@ -392,7 +402,8 @@ cdef class {{name}}HashTable(HashTable):
{{name}}VectorData *ud
bint use_na_value
- labels = np.empty(n, dtype=np.int64)
+ if return_inverse:
+ labels = np.empty(n, dtype=np.int64)
ud = uniques.data
use_na_value = na_value is not None
@@ -410,20 +421,19 @@ cdef class {{name}}HashTable(HashTable):
for i in range(n):
val = values[i]
- if val != val or (use_na_value and val == na_value2):
+ if ignore_na and (val != val
+ or (use_na_value and val == na_value2)):
+ # if missing values do not count as unique values (i.e. if
+ # ignore_na is True), skip the hashtable entry for them,
+ # and replace the corresponding label with na_sentinel
labels[i] = na_sentinel
continue
k = kh_get_{{dtype}}(self.table, val)
- if k != self.table.n_buckets:
- # k falls into a previous bucket
- idx = self.table.vals[k]
- labels[i] = idx
- else:
+ if k == self.table.n_buckets:
# k hasn't been seen yet
k = kh_put_{{dtype}}(self.table, val, &ret)
- self.table.vals[k] = count
if needs_resize(ud):
with gil:
@@ -433,23 +443,82 @@ cdef class {{name}}HashTable(HashTable):
"Vector.resize() needed")
uniques.resize()
append_data_{{dtype}}(ud, val)
- labels[i] = count
- count += 1
+ if return_inverse:
+ self.table.vals[k] = count
+ labels[i] = count
+ count += 1
+ elif return_inverse:
+ # k falls into a previous bucket
+ # only relevant in case we need to construct the inverse
+ idx = self.table.vals[k]
+ labels[i] = idx
+
+ if return_inverse:
+ return uniques.to_array(), np.asarray(labels)
+ return uniques.to_array()
- return np.asarray(labels)
+ def unique(self, const {{dtype}}_t[:] values, bint return_inverse=False):
+ """
+ Calculate unique values and labels (no sorting!)
+
+ Parameters
+ ----------
+ values : ndarray[{{dtype}}]
+ Array of values of which unique will be calculated
+ return_inverse : boolean, default False
+ Whether the mapping of the original array values to their location
+ in the vector of uniques should be returned.
+
+ Returns
+ -------
+ uniques : ndarray[{{dtype}}]
+ Unique values of input, not sorted
+ labels : ndarray[int64] (if return_inverse)
+ The labels from values to uniques
+ """
+ uniques = {{name}}Vector()
+ return self._unique(values, uniques, ignore_na=False,
+ return_inverse=return_inverse)
def factorize(self, const {{dtype}}_t[:] values, Py_ssize_t na_sentinel=-1,
object na_value=None):
- uniques = {{name}}Vector()
- labels = self._factorize(values, uniques=uniques,
- na_sentinel=na_sentinel, na_value=na_value)
- return labels, uniques.to_array()
+ """
+ Calculate unique values and labels (no sorting!)
+
+ Missing values are not included in the "uniques" for this method.
+ The labels for any missing values will be set to "na_sentinel"
+
+ Parameters
+ ----------
+ values : ndarray[{{dtype}}]
+ Array of values of which unique will be calculated
+ na_sentinel : Py_ssize_t, default -1
+ Sentinel value used for all NA-values in inverse
+ na_value : object, default None
+ Value to identify as missing. If na_value is None, then
+ any value "val" satisfying val != val is considered missing.
+ If na_value is not None, then _additionally_, any value "val"
+ satisfying val == na_value is considered missing.
+
+ Returns
+ -------
+ uniques : ndarray[{{dtype}}]
+ Unique values of input, not sorted
+ labels : ndarray[int64]
+ The labels from values to uniques
+ """
+ uniques_vector = {{name}}Vector()
+ return self._unique(values, uniques_vector, na_sentinel=na_sentinel,
+ na_value=na_value, ignore_na=True,
+ return_inverse=True)
def get_labels(self, const {{dtype}}_t[:] values, {{name}}Vector uniques,
Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
object na_value=None):
- return self._factorize(values, uniques, count_prior=count_prior,
- na_sentinel=na_sentinel, na_value=na_value)
+ _, labels = self._unique(values, uniques, count_prior=count_prior,
+ na_sentinel=na_sentinel, na_value=na_value,
+ ignore_na=True, return_inverse=True)
+ return labels
@cython.boundscheck(False)
def get_labels_groupby(self, const {{dtype}}_t[:] values):
@@ -496,44 +565,6 @@ cdef class {{name}}HashTable(HashTable):
return np.asarray(labels), arr_uniques
- @cython.boundscheck(False)
- @cython.wraparound(False)
- def unique(self, const {{dtype}}_t[:] values):
- """
- Calculate unique values without sorting
-
- Parameters
- ----------
- values : ndarray[{{dtype}}]
- Array of values of which unique will be calculated
-
- Returns
- -------
- uniques : ndarray[{{dtype}}]
- Unique values of input, not sorted
- """
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- {{dtype}}_t val
- khiter_t k
- {{name}}Vector uniques = {{name}}Vector()
- {{name}}VectorData *ud
-
- ud = uniques.data
-
- with nogil:
- for i in range(n):
- val = values[i]
- k = kh_get_{{dtype}}(self.table, val)
- if k == self.table.n_buckets:
- kh_put_{{dtype}}(self.table, val, &ret)
- if needs_resize(ud):
- with gil:
- uniques.resize()
- append_data_{{dtype}}(ud, val)
- return uniques.to_array()
-
{{endfor}}
@@ -613,56 +644,6 @@ cdef class StringHashTable(HashTable):
free(vecs)
return labels
- @cython.boundscheck(False)
- @cython.wraparound(False)
- def unique(self, ndarray[object] values):
- """
- Calculate unique values without sorting
-
- Parameters
- ----------
- values : ndarray[object]
- Array of values of which unique will be calculated
-
- Returns
- -------
- uniques : ndarray[object]
- Unique values of input, not sorted
- """
- cdef:
- Py_ssize_t i, count, n = len(values)
- int64_t[:] uindexer
- int ret = 0
- object val
- ObjectVector uniques
- khiter_t k
- const char *v
- const char **vecs
-
- vecs = <const char **>malloc(n * sizeof(char *))
- uindexer = np.empty(n, dtype=np.int64)
- for i in range(n):
- val = values[i]
- v = util.get_c_string(val)
- vecs[i] = v
-
- count = 0
- with nogil:
- for i in range(n):
- v = vecs[i]
- k = kh_get_str(self.table, v)
- if k == self.table.n_buckets:
- kh_put_str(self.table, v, &ret)
- uindexer[count] = i
- count += 1
- free(vecs)
-
- # uniques
- uniques = ObjectVector()
- for i in range(count):
- uniques.append(values[uindexer[i]])
- return uniques.to_array()
-
@cython.boundscheck(False)
def lookup(self, ndarray[object] values):
cdef:
@@ -726,11 +707,12 @@ cdef class StringHashTable(HashTable):
@cython.boundscheck(False)
@cython.wraparound(False)
- def _factorize(self, ndarray[object] values, ObjectVector uniques,
- Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
- object na_value=None):
+ def _unique(self, ndarray[object] values, ObjectVector uniques,
+ Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
+ object na_value=None, bint ignore_na=False,
+ bint return_inverse=False):
"""
- Calculate unique values and labels (no sorting); ignores all NA-values
+ Calculate unique values and labels (no sorting!)
Parameters
----------
@@ -743,13 +725,23 @@ cdef class StringHashTable(HashTable):
na_sentinel : Py_ssize_t, default -1
Sentinel value used for all NA-values in inverse
na_value : object, default None
- Value to identify as missing
+ Value to identify as missing. If na_value is None, then any value
+ that is not a string is considered missing. If na_value is
+ not None, then _additionally_ any value "val" satisfying
+ val == na_value is considered missing.
+ ignore_na : boolean, default False
+ Whether NA-values should be ignored for calculating the uniques. If
+ True, the labels corresponding to missing values will be set to
+ na_sentinel.
+ return_inverse : boolean, default False
+ Whether the mapping of the original array values to their location
+ in the vector of uniques should be returned.
Returns
-------
uniques : ndarray[object]
Unique values of input, not sorted
- labels : ndarray[int64]
+ labels : ndarray[int64] (if return_inverse=True)
The labels from values to uniques
"""
cdef:
@@ -763,41 +755,50 @@ cdef class StringHashTable(HashTable):
khiter_t k
bint use_na_value
- labels = np.zeros(n, dtype=np.int64)
+ if return_inverse:
+ labels = np.zeros(n, dtype=np.int64)
uindexer = np.empty(n, dtype=np.int64)
use_na_value = na_value is not None
- # assign pointers and pre-filter out missing
+ # assign pointers and pre-filter out missing (if ignore_na)
vecs = <const char **>malloc(n * sizeof(char *))
for i in range(n):
val = values[i]
- if (isinstance(val, (str, unicode))
- and not (use_na_value and val == na_value)):
+ if (ignore_na
+ and (not isinstance(val, (str, unicode))
+ or (use_na_value and val == na_value))):
+ # if missing values do not count as unique values (i.e. if
+ # ignore_na is True), we can skip the actual value, and
+ # replace the label with na_sentinel directly
+ labels[i] = na_sentinel
+ else:
+ # if ignore_na is False, we also stringify NaN/None/etc.
v = util.get_c_string(val)
vecs[i] = v
- else:
- labels[i] = na_sentinel
# compute
with nogil:
for i in range(n):
- if labels[i] == na_sentinel:
+ if ignore_na and labels[i] == na_sentinel:
+ # skip entries for ignored missing values (see above)
continue
v = vecs[i]
k = kh_get_str(self.table, v)
- if k != self.table.n_buckets:
- # k falls into a previous bucket
- idx = self.table.vals[k]
- labels[i] = <int64_t>idx
- else:
+ if k == self.table.n_buckets:
# k hasn't been seen yet
k = kh_put_str(self.table, v, &ret)
- self.table.vals[k] = count
uindexer[count] = i
- labels[i] = <int64_t>count
+ if return_inverse:
+ self.table.vals[k] = count
+ labels[i] = <int64_t>count
count += 1
+ elif return_inverse:
+ # k falls into a previous bucket
+ # only relevant in case we need to construct the inverse
+ idx = self.table.vals[k]
+ labels[i] = <int64_t>idx
free(vecs)
@@ -805,20 +806,72 @@ cdef class StringHashTable(HashTable):
for i in range(count):
uniques.append(values[uindexer[i]])
- return np.asarray(labels)
+ if return_inverse:
+ return uniques.to_array(), np.asarray(labels)
+ return uniques.to_array()
+
+ def unique(self, ndarray[object] values, bint return_inverse=False):
+ """
+ Calculate unique values and labels (no sorting!)
+
+ Parameters
+ ----------
+ values : ndarray[object]
+ Array of values of which unique will be calculated
+ return_inverse : boolean, default False
+ Whether the mapping of the original array values to their location
+ in the vector of uniques should be returned.
+
+ Returns
+ -------
+ uniques : ndarray[object]
+ Unique values of input, not sorted
+ labels : ndarray[int64] (if return_inverse)
+ The labels from values to uniques
+ """
+ uniques = ObjectVector()
+ return self._unique(values, uniques, ignore_na=False,
+ return_inverse=return_inverse)
def factorize(self, ndarray[object] values, Py_ssize_t na_sentinel=-1,
object na_value=None):
- uniques = ObjectVector()
- labels = self._factorize(values, uniques=uniques,
- na_sentinel=na_sentinel, na_value=na_value)
- return labels, uniques.to_array()
+ """
+ Calculate unique values and labels (no sorting!)
+
+ Missing values are not included in the "uniques" for this method.
+ The labels for any missing values will be set to "na_sentinel"
+
+ Parameters
+ ----------
+ values : ndarray[object]
+ Array of values of which unique will be calculated
+ na_sentinel : Py_ssize_t, default -1
+ Sentinel value used for all NA-values in inverse
+ na_value : object, default None
+ Value to identify as missing. If na_value is None, then any value
+ that is not a string is considered missing. If na_value is
+ not None, then _additionally_ any value "val" satisfying
+ val == na_value is considered missing.
+
+ Returns
+ -------
+ uniques : ndarray[object]
+ Unique values of input, not sorted
+ labels : ndarray[int64]
+ The labels from values to uniques
+ """
+ uniques_vector = ObjectVector()
+ return self._unique(values, uniques_vector, na_sentinel=na_sentinel,
+ na_value=na_value, ignore_na=True,
+ return_inverse=True)
def get_labels(self, ndarray[object] values, ObjectVector uniques,
Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
object na_value=None):
- return self._factorize(values, uniques, count_prior=count_prior,
- na_sentinel=na_sentinel, na_value=na_value)
+ _, labels = self._unique(values, uniques, count_prior=count_prior,
+ na_sentinel=na_sentinel, na_value=na_value,
+ ignore_na=True, return_inverse=True)
+ return labels
cdef class PyObjectHashTable(HashTable):
@@ -908,44 +961,12 @@ cdef class PyObjectHashTable(HashTable):
@cython.boundscheck(False)
@cython.wraparound(False)
- def unique(self, ndarray[object] values):
+ def _unique(self, ndarray[object] values, ObjectVector uniques,
+ Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
+ object na_value=None, bint ignore_na=False,
+ bint return_inverse=False):
"""
- Calculate unique values without sorting
-
- Parameters
- ----------
- values : ndarray[object]
- Array of values of which unique will be calculated
-
- Returns
- -------
- uniques : ndarray[object]
- Unique values of input, not sorted
- """
- cdef:
- Py_ssize_t i, n = len(values)
- int ret = 0
- object val
- khiter_t k
- ObjectVector uniques = ObjectVector()
-
- for i in range(n):
- val = values[i]
- hash(val)
- k = kh_get_pymap(self.table, <PyObject*>val)
- if k == self.table.n_buckets:
- kh_put_pymap(self.table, <PyObject*>val, &ret)
- uniques.append(val)
-
- return uniques.to_array()
-
- @cython.boundscheck(False)
- @cython.wraparound(False)
- def _factorize(self, ndarray[object] values, ObjectVector uniques,
- Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
- object na_value=None):
- """
- Calculate unique values and labels (no sorting); ignores all NA-values
+ Calculate unique values and labels (no sorting!)
Parameters
----------
@@ -959,13 +980,22 @@ cdef class PyObjectHashTable(HashTable):
Sentinel value used for all NA-values in inverse
na_value : object, default None
Value to identify as missing. If na_value is None, then None _plus_
- any value satisfying val!=val are considered missing.
+ any value "val" satisfying val != val is considered missing.
+ If na_value is not None, then _additionally_, any value "val"
+ satisfying val == na_value is considered missing.
+ ignore_na : boolean, default False
+ Whether NA-values should be ignored for calculating the uniques. If
+ True, the labels corresponding to missing values will be set to
+ na_sentinel.
+ return_inverse : boolean, default False
+ Whether the mapping of the original array values to their location
+ in the vector of uniques should be returned.
Returns
-------
uniques : ndarray[object]
Unique values of input, not sorted
- labels : ndarray[int64]
+ labels : ndarray[int64] (if return_inverse=True)
The labels from values to uniques
"""
cdef:
@@ -976,42 +1006,100 @@ cdef class PyObjectHashTable(HashTable):
khiter_t k
bint use_na_value
- labels = np.empty(n, dtype=np.int64)
+ if return_inverse:
+ labels = np.empty(n, dtype=np.int64)
use_na_value = na_value is not None
for i in range(n):
val = values[i]
hash(val)
- if ((val != val or val is None)
- or (use_na_value and val == na_value)):
+ if ignore_na and ((val != val or val is None)
+ or (use_na_value and val == na_value)):
+ # if missing values do not count as unique values (i.e. if
+ # ignore_na is True), skip the hashtable entry for them, and
+ # replace the corresponding label with na_sentinel
labels[i] = na_sentinel
continue
k = kh_get_pymap(self.table, <PyObject*>val)
- if k != self.table.n_buckets:
- # k falls into a previous bucket
- idx = self.table.vals[k]
- labels[i] = idx
- else:
+ if k == self.table.n_buckets:
# k hasn't been seen yet
k = kh_put_pymap(self.table, <PyObject*>val, &ret)
- self.table.vals[k] = count
uniques.append(val)
- labels[i] = count
- count += 1
+ if return_inverse:
+ self.table.vals[k] = count
+ labels[i] = count
+ count += 1
+ elif return_inverse:
+ # k falls into a previous bucket
+ # only relevant in case we need to construct the inverse
+ idx = self.table.vals[k]
+ labels[i] = idx
+
+ if return_inverse:
+ return uniques.to_array(), np.asarray(labels)
+ return uniques.to_array()
+
+ def unique(self, ndarray[object] values, bint return_inverse=False):
+ """
+ Calculate unique values and labels (no sorting!)
- return np.asarray(labels)
+ Parameters
+ ----------
+ values : ndarray[object]
+ Array of values of which unique will be calculated
+ return_inverse : boolean, default False
+ Whether the mapping of the original array values to their location
+ in the vector of uniques should be returned.
+
+ Returns
+ -------
+ uniques : ndarray[object]
+ Unique values of input, not sorted
+ labels : ndarray[int64] (if return_inverse)
+ The labels from values to uniques
+ """
+ uniques = ObjectVector()
+ return self._unique(values, uniques, ignore_na=False,
+ return_inverse=return_inverse)
def factorize(self, ndarray[object] values, Py_ssize_t na_sentinel=-1,
object na_value=None):
- uniques = ObjectVector()
- labels = self._factorize(values, uniques=uniques,
- na_sentinel=na_sentinel, na_value=na_value)
- return labels, uniques.to_array()
+ """
+ Calculate unique values and labels (no sorting!)
+
+ Missing values are not included in the "uniques" for this method.
+ The labels for any missing values will be set to "na_sentinel"
+
+ Parameters
+ ----------
+ values : ndarray[object]
+ Array of values of which unique will be calculated
+ na_sentinel : Py_ssize_t, default -1
+ Sentinel value used for all NA-values in inverse
+ na_value : object, default None
+ Value to identify as missing. If na_value is None, then None _plus_
+ any value "val" satisfying val != val is considered missing.
+ If na_value is not None, then _additionally_, any value "val"
+ satisfying val == na_value is considered missing.
+
+ Returns
+ -------
+ uniques : ndarray[object]
+ Unique values of input, not sorted
+ labels : ndarray[int64]
+ The labels from values to uniques
+ """
+ uniques_vector = ObjectVector()
+ return self._unique(values, uniques_vector, na_sentinel=na_sentinel,
+ na_value=na_value, ignore_na=True,
+ return_inverse=True)
def get_labels(self, ndarray[object] values, ObjectVector uniques,
Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1,
object na_value=None):
- return self._factorize(values, uniques, count_prior=count_prior,
- na_sentinel=na_sentinel, na_value=na_value)
+ _, labels = self._unique(values, uniques, count_prior=count_prior,
+ na_sentinel=na_sentinel, na_value=na_value,
+ ignore_na=True, return_inverse=True)
+ return labels
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 5f7995ac649a2..98cb45a4d4efc 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -460,7 +460,7 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None,
(hash_klass, _), values = _get_data_algo(values, _hashtables)
table = hash_klass(size_hint or len(values))
- labels, uniques = table.factorize(values, na_sentinel=na_sentinel,
+ uniques, labels = table.factorize(values, na_sentinel=na_sentinel,
na_value=na_value)
labels = ensure_platform_int(labels)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index fa33a1ceae0b9..c9d403f6696af 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1361,6 +1361,14 @@ def test_hashtable_unique(self, htable, tm_dtype, writable):
result_unique = htable().unique(s_duplicated.values)
tm.assert_numpy_array_equal(result_unique, expected_unique)
+ # test return_inverse=True
+ # reconstruction can only succeed if the inverse is correct
+ result_unique, result_inverse = htable().unique(s_duplicated.values,
+ return_inverse=True)
+ tm.assert_numpy_array_equal(result_unique, expected_unique)
+ reconstr = result_unique[result_inverse]
+ tm.assert_numpy_array_equal(reconstr, s_duplicated.values)
+
@pytest.mark.parametrize('htable, tm_dtype', [
(ht.PyObjectHashTable, 'String'),
(ht.StringHashTable, 'String'),
@@ -1383,7 +1391,7 @@ def test_hashtable_factorize(self, htable, tm_dtype, writable):
s_duplicated.values.setflags(write=writable)
na_mask = s_duplicated.isna().values
- result_inverse, result_unique = htable().factorize(s_duplicated.values)
+ result_unique, result_inverse = htable().factorize(s_duplicated.values)
# drop_duplicates has own cython code (hash_table_func_helper.pxi)
# and is tested separately; keeps first occurrence like ht.factorize()
| This is a continuation/split-off from #22986, where I tried to deduplicate the cython code for `unique`/`factorize`, and add a `return_inverse` kwarg to `unique` at the same time. This didn't fully work because there was a performance impact that was deemed unacceptable. I've opened cython/cython#2660 to figure out why (resp. have a directive to allow force-compilation of different parameter values, which would solve this more elegantly), and @robertwb told me that it's likely the use of kwargs, but also suggested the possibility to use explicit templating on `return_inverse`.
First I tried unification without any kwargs in https://github.com/pandas-dev/pandas/pull/23400/commits/858f54e5e4d5e84f2a45f7c5954cbde9fb6d9438 - this still had the same perf impact as in #22986.
Then I added the explict templating in https://github.com/pandas-dev/pandas/pull/23400/commits/4ed354a9544d0576aa1b98ffd26d3667677ed6ea, which successfully avoids the perf impact, but is a bit uglier in the `pxi.in`. Finally, I've readded the kwargs in https://github.com/pandas-dev/pandas/pull/23400/commits/906cd50e8391148a19fb93487b7fedb25ab9e767 to keep the diff here smaller, and this doesn't have an impact.
I've had many unsuccessful tries to run the ASVs for all those combinations over the weekend, so I'm going back to explicitly testing the unique-code here (code at the end). The overview is as follows (for completeness I've added the #22986 and the commit immediately prior on master; all results in milliseconds):
```
>>> df
e5196aaac6 99e640186e caea25a8b9 858f54e5e4 4ed354a954 906cd50e83 0d6dad0ca1
#22986~1 #22986 master no kwargs templated tmpl+kwargs PR+master
StringIndex 16.220276 16.023982 15.967190 15.706223 15.624902 14.952832 15.480224
CategoricalIndex 1.274368 1.289882 1.231592 1.400420 1.247882 1.250814 1.327520
IntIndex 2.743178 2.778960 2.722282 3.115781 2.818848 2.802402 2.877664
UIntIndex 2.760333 2.773203 2.714546 3.037279 2.786909 2.800675 2.914427
RangeIndex 2.765113 2.773916 2.804379 3.071501 2.801661 2.803809 2.816012
FloatIndex 4.734558 4.576122 4.575152 5.043915 4.518481 4.548134 4.732967
TimedeltaIndex 4.361350 4.238442 4.187650 4.601932 4.369402 4.409724 4.422439
StringSeries 59.779114 58.508761 57.742805 58.343838 58.266719 57.043169 58.619018
CategoricalSeries 2.708816 2.666240 2.627852 2.895955 2.664937 2.658149 2.715396
IntSeries 5.078922 4.991856 4.928836 5.480455 5.108894 5.096692 5.434841
UIntSeries 5.127376 4.984227 5.027707 5.445736 5.155703 5.080125 5.211029
RangeSeries 5.218271 5.013379 4.973759 5.531086 5.193241 5.091924 5.197651
FloatSeries 7.126269 6.959626 6.988052 7.738704 6.977395 6.978753 7.215513
TimedeltaSeries 5.146969 4.985955 5.024334 5.551364 5.153091 5.108740 5.158593
```
or, relatively to the commit on master I was basing myself on:
```
>>> df.divide(df.iloc[:, 2], axis=0)
e5196aaac6 99e640186e caea25a8b9 858f54e5e4 4ed354a954 906cd50e83 0d6dad0ca1
#22986~1 #22986 master no kwargs templated tmpl+kwargs PR+master
StringIndex 1.015850 1.003557 1.0 0.983656 0.978563 0.936472 0.969502
CategoricalIndex 1.034733 1.047329 1.0 1.137081 1.013227 1.015607 1.077889
IntIndex 1.007676 1.020820 1.0 1.144547 1.035472 1.029431 1.057078
UIntIndex 1.016868 1.021608 1.0 1.118890 1.026658 1.031729 1.073633
RangeIndex 0.985999 0.989138 1.0 1.095252 0.999031 0.999797 1.004148
FloatIndex 1.034842 1.000212 1.0 1.102458 0.987613 0.994095 1.034494
TimedeltaIndex 1.041479 1.012129 1.0 1.098930 1.043402 1.053031 1.056067
StringSeries 1.035265 1.013265 1.0 1.010409 1.009073 0.987884 1.015174
CategoricalSeries 1.030810 1.014608 1.0 1.102024 1.014112 1.011529 1.033314
IntSeries 1.030450 1.012786 1.0 1.111917 1.036531 1.034056 1.102662
UIntSeries 1.019824 0.991352 1.0 1.083145 1.025458 1.010426 1.036462
RangeSeries 1.049160 1.007966 1.0 1.112053 1.044128 1.023758 1.045015
FloatSeries 1.019779 0.995932 1.0 1.107419 0.998475 0.998669 1.032550
TimedeltaSeries 1.024408 0.992361 1.0 1.104895 1.025627 1.016799 1.026722
```
So long story short, this PR prepares the hashtable-backend to support `return_inverse=True`, which plays into #21357 #21645 #22824, and will also allow to easily solve #21720.
Code for the above timings:
```
import pandas as pd
import numpy as np
import pandas.util.testing as tm
import timeit
hash = pd.__git_version__[:10]
k = 10 ** 5
rep = 10
number = 100
tic = timeit.default_timer()
tags = ['String', 'Categorical', 'Int', 'UInt',
'Range', 'Float', 'Timedelta']
df = pd.DataFrame(index = [x+'Index' for x in tags], columns = ['mean', 'std'])
np.random.seed(55555)
with tm.RNGContext(55555):
for tag in tags:
idx = getattr(tm, f'make{tag}Index')(k=k)
t = timeit.repeat('idx.unique()', setup='from __main__ import idx', repeat = rep+1, number = number)[1:]
df.loc[tag+'Index', 'mean'] = pd.Series(t).mean() / number
df.loc[tag+'Index', 'std'] = pd.Series(t).std() / number
s = pd.Series(idx).sample(frac=2, replace=True)
t = timeit.repeat('s.unique()', setup='from __main__ import s', repeat = rep+1, number = number)[1:]
df.loc[tag+'Series', 'mean'] = pd.Series(t).mean() / number
df.loc[tag+'Series', 'std'] = pd.Series(t).std() / number
df.to_csv(f'test_{hash}.csv')
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/23400 | 2018-10-28T17:55:35Z | 2018-11-29T17:21:25Z | 2018-11-29T17:21:25Z | 2018-11-29T20:54:57Z |
ENH: Use flake8 to check for PEP8 violations in doctests | diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index aa8a1500d9d3d..a3feee6552178 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -225,8 +225,9 @@ def good_imports(self):
Examples
--------
This example does not import pandas or import numpy.
- >>> import time
>>> import datetime
+ >>> datetime.MAXYEAR
+ 9999
"""
pass
@@ -596,6 +597,44 @@ def prefix_pandas(self):
pass
+class BadExamples(object):
+
+ def unused_import(self):
+ """
+ Examples
+ --------
+ >>> import pandas as pdf
+ >>> df = pd.DataFrame(np.ones((3, 3)), columns=('a', 'b', 'c'))
+ """
+ pass
+
+ def missing_whitespace_around_arithmetic_operator(self):
+ """
+ Examples
+ --------
+ >>> 2+5
+ 7
+ """
+ pass
+
+ def indentation_is_not_a_multiple_of_four(self):
+ """
+ Examples
+ --------
+ >>> if 2 + 5:
+ ... pass
+ """
+ pass
+
+ def missing_whitespace_after_comma(self):
+ """
+ Examples
+ --------
+ >>> df = pd.DataFrame(np.ones((3,3)),columns=('a','b', 'c'))
+ """
+ pass
+
+
class TestValidator(object):
def _import_path(self, klass=None, func=None):
@@ -634,7 +673,7 @@ def test_good_class(self):
@capture_stderr
@pytest.mark.parametrize("func", [
'plot', 'sample', 'random_letters', 'sample_values', 'head', 'head1',
- 'contains', 'mode'])
+ 'contains', 'mode', 'good_imports'])
def test_good_functions(self, func):
errors = validate_one(self._import_path(
klass='GoodDocStrings', func=func))['errors']
@@ -714,16 +753,25 @@ def test_bad_generic_functions(self, func):
marks=pytest.mark.xfail),
# Examples tests
('BadGenericDocStrings', 'method',
- ('numpy does not need to be imported in the examples,')),
+ ('numpy does not need to be imported in the examples',)),
('BadGenericDocStrings', 'method',
- ('pandas does not need to be imported in the examples,')),
+ ('pandas does not need to be imported in the examples',)),
# See Also tests
('BadSeeAlso', 'prefix_pandas',
('pandas.Series.rename in `See Also` section '
- 'does not need `pandas` prefix',))
+ 'does not need `pandas` prefix',)),
+ # Examples tests
+ ('BadExamples', 'unused_import',
+ ('1 F401 \'pandas as pdf\' imported but unused',)),
+ ('BadExamples', 'indentation_is_not_a_multiple_of_four',
+ ('1 E111 indentation is not a multiple of four',)),
+ ('BadExamples', 'missing_whitespace_around_arithmetic_operator',
+ ('1 E226 missing whitespace around arithmetic operator',)),
+ ('BadExamples', 'missing_whitespace_after_comma',
+ ('3 E231 missing whitespace after \',\'',)),
])
def test_bad_examples(self, capsys, klass, func, msgs):
- result = validate_one(self._import_path(klass=klass, func=func)) # noqa:F821
+ result = validate_one(self._import_path(klass=klass, func=func))
for msg in msgs:
assert msg in ' '.join(result['errors'])
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index 4c54762f6df31..ef6465c3e988d 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -24,6 +24,10 @@
import inspect
import importlib
import doctest
+import tempfile
+
+import flake8.main.application
+
try:
from io import StringIO
except ImportError:
@@ -168,7 +172,7 @@ def _load_obj(name):
@staticmethod
def _to_original_callable(obj):
"""
- Find the Python object that contains the source code ot the object.
+ Find the Python object that contains the source code of the object.
This is useful to find the place in the source code (file and line
number) where a docstring is defined. It does not currently work for
@@ -407,6 +411,26 @@ def examples_source_code(self):
lines = doctest.DocTestParser().get_examples(self.raw_doc)
return [line.source for line in lines]
+ def validate_pep8(self):
+ if not self.examples:
+ return
+
+ content = ''.join(('import numpy as np # noqa: F401\n',
+ 'import pandas as pd # noqa: F401\n',
+ *self.examples_source_code))
+
+ application = flake8.main.application.Application()
+ application.initialize(["--quiet"])
+
+ with tempfile.NamedTemporaryFile(mode='w') as file:
+ file.write(content)
+ file.flush()
+ application.run_checks([file.name])
+
+ application.report()
+
+ yield from application.guide.stats.statistics_for('')
+
def validate_one(func_name):
"""
@@ -495,6 +519,13 @@ def validate_one(func_name):
for param_err in param_errs:
errs.append('\t{}'.format(param_err))
+ pep8_errs = list(doc.validate_pep8())
+ if pep8_errs:
+ errs.append('Linting issues in doctests:')
+ for err in pep8_errs:
+ errs.append('\t{} {} {}'.format(err.count, err.error_code,
+ err.message))
+
if doc.is_function_or_method:
if not doc.returns and "return" in doc.method_source:
errs.append('No Returns section found')
| - [x] tests added
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Improvement to PR #23381 regarding issue #23154
Enables `--doctests` flag, resulting in doctests being checked by flake8
Uses [flake8.api.legacy](http://flake8.pycqa.org/en/latest/user/python-api.html) to invoke the tests.
Downsides are:
* > Flake8 3.0.0 presently does not have a public, stable Python API.
* provided options in `flake8.get_style_guide` don't have an effect. Thus `setup.cfg` had to be enhanced.
* returns the count of each error within the complete file, not just the requested function. [Might be confusing]
`python ./scripts/validate_docstrings.py pandas.read_excel`
```
[...]
Parameter "convert_float" description should finish with "."
Errors in doctest sections
2 F721 syntax error in doctest
2 F821 undefined name '_make_signature'
```
Help in the form of `Use "flake8 pandas/util/_decorators.py" for further information.` could be added to the output:
```
pandas/util/_decorators.py:320:26: F721 syntax error in doctest
pandas/util/_decorators.py:321:13: F721 syntax error in doctest
pandas/util/_decorators.py:322:15: F821 undefined name '_make_signature'
pandas/util/_decorators.py:322:31: F821 undefined name 'f'
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/23399 | 2018-10-28T17:51:57Z | 2018-11-04T18:42:47Z | 2018-11-04T18:42:47Z | 2018-11-04T19:29:50Z |
ENH: to_list as alias for tolist | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 49c89a53e7b17..9586483d44bdf 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -323,7 +323,7 @@ Conversion
Series.bool
Series.to_period
Series.to_timestamp
- Series.tolist
+ Series.to_list
Series.get_values
@@ -1534,7 +1534,7 @@ Conversion
Index.item
Index.map
Index.ravel
- Index.tolist
+ Index.to_list
Index.to_native_types
Index.to_series
Index.to_frame
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst
index b32603cb78795..8c4928cd8165e 100644
--- a/doc/source/timedeltas.rst
+++ b/doc/source/timedeltas.rst
@@ -436,11 +436,11 @@ Finally, the combination of ``TimedeltaIndex`` with ``DatetimeIndex`` allow cert
.. ipython:: python
tdi = pd.TimedeltaIndex(['1 days', pd.NaT, '2 days'])
- tdi.tolist()
+ tdi.to_list()
dti = pd.date_range('20130101', periods=3)
- dti.tolist()
- (dti + tdi).tolist()
- (dti - tdi).tolist()
+ dti.to_list()
+ (dti + tdi).to_list()
+ (dti - tdi).to_list()
Conversions
~~~~~~~~~~~
@@ -461,7 +461,7 @@ Scalars type ops work as well. These can potentially return a *different* type o
# subtraction of a date and a timedelta -> datelike
# note that trying to subtract a date from a Timedelta will raise an exception
- (pd.Timestamp('20130101') - tdi).tolist()
+ (pd.Timestamp('20130101') - tdi).to_list()
# timedelta + timedelta -> timedelta
tdi + pd.Timedelta('10 days')
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 0d6fc735f3025..2a6249bef112b 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -2338,7 +2338,7 @@ Infer the ambiguous times
.. ipython:: python
rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', ambiguous='infer')
- rng_hourly_eastern.tolist()
+ rng_hourly_eastern.to_list()
In addition to 'infer', there are several other arguments supported. Passing
an array-like of bools or 0s/1s where True represents a DST hour and False a
@@ -2351,8 +2351,8 @@ constructor as well as ``tz_localize``.
.. ipython:: python
rng_hourly_dst = np.array([1, 1, 0, 0, 0])
- rng_hourly.tz_localize('US/Eastern', ambiguous=rng_hourly_dst).tolist()
- rng_hourly.tz_localize('US/Eastern', ambiguous='NaT').tolist()
+ rng_hourly.tz_localize('US/Eastern', ambiguous=rng_hourly_dst).to_list()
+ rng_hourly.tz_localize('US/Eastern', ambiguous='NaT').to_list()
didx = pd.DatetimeIndex(start='2014-08-01 09:00', freq='H',
periods=10, tz='US/Eastern')
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index a18c26f911f1d..8a7f6f3f707e1 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -1071,6 +1071,7 @@ Other API Changes
- :meth:`Index.hasnans` and :meth:`Series.hasnans` now always return a python boolean. Previously, a python or a numpy boolean could be returned, depending on circumstances (:issue:`23294`).
- The order of the arguments of :func:`DataFrame.to_html` and :func:`DataFrame.to_string` is rearranged to be consistent with each other. (:issue:`23614`)
- :meth:`CategoricalIndex.reindex` now raises a ``ValueError`` if the target index is non-unique and not equal to the current index. It previously only raised if the target index was not of a categorical dtype (:issue:`23963`).
+- :func:`Series.to_list` and :func:`Index.to_list` are now aliases of ``Series.tolist`` respectively ``Index.tolist`` (:issue:`8826`)
.. _whatsnew_0240.deprecations:
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index abadd64b441b4..54929845c4b30 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -309,7 +309,8 @@ class Categorical(ExtensionArray, PandasObject):
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
- _deprecations = frozenset(['labels'])
+ # tolist is not actually deprecated, just suppressed in the __dir__
+ _deprecations = frozenset(['labels', 'tolist'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
@@ -567,6 +568,8 @@ def tolist(self):
"""
return list(self)
+ to_list = tolist
+
@property
def base(self):
"""
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 1d2a0a2544dbc..47ba56eefc8e7 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1025,6 +1025,8 @@ def tolist(self):
else:
return self._values.tolist()
+ to_list = tolist
+
def __iter__(self):
"""
Return an iterator of the values.
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index fc5f6758f9e06..3d037af26b954 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -31,7 +31,7 @@
from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core import ops
-from pandas.core.accessor import CachedAccessor
+from pandas.core.accessor import CachedAccessor, DirNamesMixin
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray
from pandas.core.base import IndexOpsMixin, PandasObject
@@ -202,6 +202,9 @@ class Index(IndexOpsMixin, PandasObject):
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
"""
+ # tolist is not actually deprecated, just suppressed in the __dir__
+ _deprecations = DirNamesMixin._deprecations | frozenset(['tolist'])
+
# To hand over control to subclasses
_join_precedence = 1
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 4f9465354a47b..f9c9c3ab81937 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -134,9 +134,10 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
"""
_metadata = ['name']
_accessors = {'dt', 'cat', 'str', 'sparse'}
+ # tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = generic.NDFrame._deprecations | frozenset(
['asobject', 'reshape', 'get_value', 'set_value',
- 'from_csv', 'valid'])
+ 'from_csv', 'valid', 'tolist'])
# Override cache_readonly bc Series is mutable
hasnans = property(base.IndexOpsMixin.hasnans.func,
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 47fafe2a900b4..9f0def034f976 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -1099,9 +1099,10 @@ class TestToIterable(object):
'method',
[
lambda x: x.tolist(),
+ lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
- ], ids=['tolist', 'list', 'iter'])
+ ], ids=['tolist', 'to_list', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
@@ -1122,9 +1123,10 @@ def test_iterable(self, typ, method, dtype, rdtype):
'method',
[
lambda x: x.tolist(),
+ lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
- ], ids=['tolist', 'list', 'iter'])
+ ], ids=['tolist', 'to_list', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_object_and_category(self, typ, method,
dtype, rdtype, obj):
@@ -1167,9 +1169,10 @@ def test_iterable_map(self, typ, dtype, rdtype):
'method',
[
lambda x: x.tolist(),
+ lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
- ], ids=['tolist', 'list', 'iter'])
+ ], ids=['tolist', 'to_list', 'list', 'iter'])
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp('1999-12-31'),
Timestamp('2000-12-31')])
| - [x] closes #8826
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23398 | 2018-10-28T13:28:52Z | 2018-12-13T01:47:43Z | 2018-12-13T01:47:43Z | 2018-12-13T01:47:47Z |
DEPR: Deprecate FrozenNDArray | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 89acd1a14a412..58c4a9ad4e13d 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -930,6 +930,7 @@ Deprecations
- :func:`DatetimeIndex.shift` and :func:`PeriodIndex.shift` now accept ``periods`` argument instead of ``n`` for consistency with :func:`Index.shift` and :func:`Series.shift`. Using ``n`` throws a deprecation warning (:issue:`22458`, :issue:`22912`)
- The ``fastpath`` keyword of the different Index constructors is deprecated (:issue:`23110`).
- :meth:`Timestamp.tz_localize`, :meth:`DatetimeIndex.tz_localize`, and :meth:`Series.tz_localize` have deprecated the ``errors`` argument in favor of the ``nonexistent`` argument (:issue:`8917`)
+- The class ``FrozenNDArray`` has been deprecated. When unpickling, ``FrozenNDArray`` will be unpickled to ``np.ndarray`` once this class is removed (:issue:`9031`)
.. _whatsnew_0240.prior_deprecations:
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 59c162251c58f..a8fd54e39091b 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -60,6 +60,17 @@ def load_reduce(self):
('pandas.core.arrays', 'SparseArray'),
# 15477
+ #
+ # TODO: When FrozenNDArray is removed, add
+ # the following lines for compat:
+ #
+ # ('pandas.core.base', 'FrozenNDArray'):
+ # ('numpy', 'ndarray'),
+ # ('pandas.core.indexes.frozen', 'FrozenNDArray'):
+ # ('numpy', 'ndarray'),
+ #
+ # Afterwards, remove the current entry
+ # for `pandas.core.base.FrozenNDArray`.
('pandas.core.base', 'FrozenNDArray'):
('pandas.core.indexes.frozen', 'FrozenNDArray'),
('pandas.core.base', 'FrozenList'):
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index 289970aaf3a82..4f782e22c2370 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -8,6 +8,7 @@
"""
+import warnings
import numpy as np
from pandas.core.base import PandasObject
from pandas.util._decorators import deprecate_kwarg
@@ -86,6 +87,10 @@ class FrozenNDArray(PandasObject, np.ndarray):
# no __array_finalize__ for now because no metadata
def __new__(cls, data, dtype=None, copy=False):
+ warnings.warn("\nFrozenNDArray is deprecated and will be removed in a "
+ "future version.\nPlease use `numpy.ndarray` instead.\n",
+ FutureWarning, stacklevel=2)
+
if copy is None:
copy = not isinstance(data, FrozenNDArray)
res = np.array(data, dtype=dtype, copy=copy).view(cls)
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 79ac32d2f6a0b..24e7d9bebae3e 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -40,8 +40,7 @@
Index, ensure_index,
InvalidIndexError,
_index_shared_docs)
-from pandas.core.indexes.frozen import (
- FrozenNDArray, FrozenList, _ensure_frozen)
+from pandas.core.indexes.frozen import FrozenList, _ensure_frozen
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
@@ -1655,7 +1654,7 @@ def _assert_take_fillable(self, values, indices, allow_fill=True,
for new_label in taken:
label_values = new_label.values()
label_values[mask] = na_value
- masked.append(FrozenNDArray(label_values))
+ masked.append(np.asarray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.labels]
diff --git a/pandas/tests/indexes/test_frozen.py b/pandas/tests/indexes/test_frozen.py
index bc3e87a4622a7..e62329dec9846 100644
--- a/pandas/tests/indexes/test_frozen.py
+++ b/pandas/tests/indexes/test_frozen.py
@@ -1,3 +1,4 @@
+import warnings
import numpy as np
from pandas.compat import u
@@ -34,13 +35,23 @@ def test_inplace(self):
class TestFrozenNDArray(CheckImmutable, CheckStringMixin):
mutable_methods = ('put', 'itemset', 'fill')
- unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
- def setup_method(self, method):
+ def setup_method(self, _):
self.lst = [3, 5, 7, -2]
- self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("ignore", FutureWarning)
+
+ self.container = FrozenNDArray(self.lst)
+ self.unicode_container = FrozenNDArray(
+ [u("\u05d0"), u("\u05d1"), "c"])
+
+ def test_constructor_warns(self):
+ # see gh-9031
+ with tm.assert_produces_warning(FutureWarning):
+ FrozenNDArray([1, 2, 3])
+
def test_shallow_copying(self):
original = self.container.copy()
assert isinstance(self.container.view(), FrozenNDArray)
| Rationale can be found [here](https://github.com/pandas-dev/pandas/issues/9031#issuecomment-306658765).
Closes #9031.
xref #14565 (potentially can close this given how central `FrozenNDArray` was to discussion) | https://api.github.com/repos/pandas-dev/pandas/pulls/23396 | 2018-10-28T08:23:41Z | 2018-10-30T12:29:32Z | 2018-10-30T12:29:31Z | 2018-10-30T16:52:04Z |
DOC: Remove Series.sortlevel from api.rst | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 6e8eb83577c46..8f0f5fa7610eb 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -515,7 +515,6 @@ Reshaping, sorting
Series.repeat
Series.squeeze
Series.view
- Series.sortlevel
Combining / joining / merging
| https://travis-ci.org/pandas-dev/pandas/jobs/447288079#L1918
Was dropped in #15099 but accidentally added back in #18202.
Follow-up to #23375. | https://api.github.com/repos/pandas-dev/pandas/pulls/23395 | 2018-10-28T07:46:12Z | 2018-10-28T13:34:44Z | 2018-10-28T13:34:44Z | 2018-10-28T18:45:29Z |
ENH: Add FrozenList.union and .difference | diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index fecc336049a40..0a896bac0f2d7 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -125,6 +125,16 @@ We could naturally group by either the ``A`` or ``B`` columns, or both:
grouped = df.groupby('A')
grouped = df.groupby(['A', 'B'])
+.. versionadded:: 0.24
+
+If we also have a MultiIndex on columns ``A`` and ``B``, we can group by all
+but the specified columns
+
+.. ipython:: python
+
+ df2 = df.set_index(['A', 'B'])
+ grouped = df2.groupby(level=df2.index.names.difference(['B'])
+
These will split the DataFrame on its index (rows). We could also split by the
columns:
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index c7820a8cb9de1..209ee6098c563 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -13,10 +13,9 @@ v0.24.0 (Month XX, 2018)
New features
~~~~~~~~~~~~
- :func:`merge` now directly allows merge between objects of type ``DataFrame`` and named ``Series``, without the need to convert the ``Series`` object into a ``DataFrame`` beforehand (:issue:`21220`)
-
-
- ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`)
-
+- ``FrozenList`` has gained the ``.union()`` and ``.difference()`` methods. This functionality greatly simplifies groupby's that rely on explicitly excluding certain columns. See :ref:`Splitting an object into groups
+<groupby.split>` for more information (:issue:`15475`, :issue:`15506`)
- :func:`DataFrame.to_parquet` now accepts ``index`` as an argument, allowing
the user to override the engine's default behavior to include or omit the
dataframe's indexes from the resulting Parquet file. (:issue:`20768`)
diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py
index 4f782e22c2370..3ac4a2bf31a7e 100644
--- a/pandas/core/indexes/frozen.py
+++ b/pandas/core/indexes/frozen.py
@@ -23,15 +23,47 @@ class FrozenList(PandasObject, list):
because it's technically non-hashable, will be used
for lookups, appropriately, etc.
"""
- # Sidenote: This has to be of type list, otherwise it messes up PyTables
- # typechecks
+ # Side note: This has to be of type list. Otherwise,
+ # it messes up PyTables type checks.
- def __add__(self, other):
+ def union(self, other):
+ """
+ Returns a FrozenList with other concatenated to the end of self.
+
+ Parameters
+ ----------
+ other : array-like
+ The array-like whose elements we are concatenating.
+
+ Returns
+ -------
+ diff : FrozenList
+ The collection difference between self and other.
+ """
if isinstance(other, tuple):
other = list(other)
- return self.__class__(super(FrozenList, self).__add__(other))
+ return type(self)(super(FrozenList, self).__add__(other))
+
+ def difference(self, other):
+ """
+ Returns a FrozenList with elements from other removed from self.
+
+ Parameters
+ ----------
+ other : array-like
+ The array-like whose elements we are removing self.
+
+ Returns
+ -------
+ diff : FrozenList
+ The collection difference between self and other.
+ """
+ other = set(other)
+ temp = [x for x in self if x not in other]
+ return type(self)(temp)
- __iadd__ = __add__
+ # TODO: Consider deprecating these in favor of `union` (xref gh-15506)
+ __add__ = __iadd__ = union
# Python 2 compat
def __getslice__(self, i, j):
diff --git a/pandas/tests/indexes/test_frozen.py b/pandas/tests/indexes/test_frozen.py
index e62329dec9846..db9f875b77b8a 100644
--- a/pandas/tests/indexes/test_frozen.py
+++ b/pandas/tests/indexes/test_frozen.py
@@ -11,7 +11,7 @@ class TestFrozenList(CheckImmutable, CheckStringMixin):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
- def setup_method(self, method):
+ def setup_method(self, _):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
@@ -25,13 +25,30 @@ def test_add(self):
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
- def test_inplace(self):
+ def test_iadd(self):
q = r = self.container
+
q += [5]
self.check_result(q, self.lst + [5])
- # other shouldn't be mutated
+
+ # Other shouldn't be mutated.
self.check_result(r, self.lst)
+ def test_union(self):
+ result = self.container.union((1, 2, 3))
+ expected = FrozenList(self.lst + [1, 2, 3])
+ self.check_result(result, expected)
+
+ def test_difference(self):
+ result = self.container.difference([2])
+ expected = FrozenList([1, 3, 4, 5])
+ self.check_result(result, expected)
+
+ def test_difference_dupe(self):
+ result = FrozenList([1, 2, 3, 2]).difference([2])
+ expected = FrozenList([1, 3])
+ self.check_result(result, expected)
+
class TestFrozenNDArray(CheckImmutable, CheckStringMixin):
mutable_methods = ('put', 'itemset', 'fill')
| Re-attempt of #15506.
Hopefully no doc slowdowns on Travis this time 🙏 ! (xref #15559)
Closes #15475.
No deprecation of `__add__` and `__iadd__` because we rely on overloading `+` internally when operating on array-likes (in general) in some cases (e.g. [here](https://github.com/pandas-dev/pandas/blob/62a15fa4071/pandas/core/reshape/melt.py#L82)). In some cases, that array-like will be `FrozenList`. | https://api.github.com/repos/pandas-dev/pandas/pulls/23394 | 2018-10-28T06:47:55Z | 2018-11-03T13:56:53Z | 2018-11-03T13:56:52Z | 2018-11-05T10:27:01Z |
API: Disallow dtypes w/o frequency when casting | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 51c398518c153..3fcdb82e39964 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -942,6 +942,7 @@ Removal of prior version deprecations/changes
- Removal of the previously deprecated module ``pandas.core.datetools`` (:issue:`14105`, :issue:`14094`)
- Strings passed into :meth:`DataFrame.groupby` that refer to both column and index levels will raise a ``ValueError`` (:issue:`14432`)
- :meth:`Index.repeat` and :meth:`MultiIndex.repeat` have renamed the ``n`` argument to ``repeats`` (:issue:`14645`)
+- The ``Series`` constructor and ``.astype`` method will now raise a ``ValueError`` if timestamp dtypes are passed in without a frequency (e.g. ``np.datetime64``) for the ``dtype`` parameter (:issue:`15987`)
- Removal of the previously deprecated ``as_indexer`` keyword completely from ``str.match()`` (:issue:`22356`, :issue:`6581`)
- Removed the ``pandas.formats.style`` shim for :class:`pandas.io.formats.style.Styler` (:issue:`16059`)
- :func:`pandas.pnow`, :func:`pandas.match`, :func:`pandas.groupby`, :func:`pd.get_store`, ``pd.Expr``, and ``pd.Term`` have been removed (:issue:`15538`, :issue:`15940`)
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 46c8126f65fec..f8b7fb7d88ee0 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -3,7 +3,6 @@
from datetime import datetime, timedelta
import numpy as np
-import warnings
from pandas._libs import tslib, lib, tslibs
from pandas._libs.tslibs import iNaT, OutOfBoundsDatetime, Period
@@ -664,6 +663,11 @@ def astype_nansafe(arr, dtype, copy=True, skipna=False):
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
+
+ Raises
+ ------
+ ValueError
+ The dtype was a datetime /timedelta dtype, but it had no frequency.
"""
# dispatch on extension dtype if needed
@@ -745,12 +749,9 @@ def astype_nansafe(arr, dtype, copy=True, skipna=False):
return astype_nansafe(to_timedelta(arr).values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
- msg = ("Passing in '{dtype}' dtype with no frequency is "
- "deprecated and will raise in a future version. "
+ msg = ("The '{dtype}' dtype has no frequency. "
"Please pass in '{dtype}[ns]' instead.")
- warnings.warn(msg.format(dtype=dtype.name),
- FutureWarning, stacklevel=5)
- dtype = np.dtype(dtype.name + "[ns]")
+ raise ValueError(msg.format(dtype=dtype.name))
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
@@ -1019,16 +1020,14 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'):
if is_datetime64 or is_datetime64tz or is_timedelta64:
- # force the dtype if needed
- msg = ("Passing in '{dtype}' dtype with no frequency is "
- "deprecated and will raise in a future version. "
+ # Force the dtype if needed.
+ msg = ("The '{dtype}' dtype has no frequency. "
"Please pass in '{dtype}[ns]' instead.")
if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE):
if dtype.name in ('datetime64', 'datetime64[ns]'):
if dtype.name == 'datetime64':
- warnings.warn(msg.format(dtype=dtype.name),
- FutureWarning, stacklevel=5)
+ raise ValueError(msg.format(dtype=dtype.name))
dtype = _NS_DTYPE
else:
raise TypeError("cannot convert datetimelike to "
@@ -1044,8 +1043,7 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'):
elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE):
if dtype.name in ('timedelta64', 'timedelta64[ns]'):
if dtype.name == 'timedelta64':
- warnings.warn(msg.format(dtype=dtype.name),
- FutureWarning, stacklevel=5)
+ raise ValueError(msg.format(dtype=dtype.name))
dtype = _TD_DTYPE
else:
raise TypeError("cannot convert timedeltalike to "
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 07b8eb930e8d0..bdd99dd485042 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1192,32 +1192,26 @@ def test_constructor_cast_object(self, index):
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
- def test_constructor_generic_timestamp_deprecated(self):
- # see gh-15524
-
- with tm.assert_produces_warning(FutureWarning):
- dtype = np.timedelta64
- s = Series([], dtype=dtype)
-
- assert s.empty
- assert s.dtype == 'm8[ns]'
-
- with tm.assert_produces_warning(FutureWarning):
- dtype = np.datetime64
- s = Series([], dtype=dtype)
+ @pytest.mark.parametrize("dtype", [
+ np.datetime64,
+ np.timedelta64,
+ ])
+ def test_constructor_generic_timestamp_no_frequency(self, dtype):
+ # see gh-15524, gh-15987
+ msg = "dtype has no frequency. Please pass in"
- assert s.empty
- assert s.dtype == 'M8[ns]'
+ with tm.assert_raises_regex(ValueError, msg):
+ Series([], dtype=dtype)
- # These timestamps have the wrong frequencies,
- # so an Exception should be raised now.
- msg = "cannot convert timedeltalike"
- with tm.assert_raises_regex(TypeError, msg):
- Series([], dtype='m8[ps]')
+ @pytest.mark.parametrize("dtype,msg", [
+ ("m8[ps]", "cannot convert timedeltalike"),
+ ("M8[ps]", "cannot convert datetimelike"),
+ ])
+ def test_constructor_generic_timestamp_bad_frequency(self, dtype, msg):
+ # see gh-15524, gh-15987
- msg = "cannot convert datetimelike"
with tm.assert_raises_regex(TypeError, msg):
- Series([], dtype='M8[ps]')
+ Series([], dtype=dtype)
@pytest.mark.parametrize('dtype', [None, 'uint8', 'category'])
def test_constructor_range_dtype(self, dtype):
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index b862f1588a547..c62531241369d 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -3,7 +3,6 @@
import string
import sys
-import warnings
from datetime import datetime, timedelta
import numpy as np
@@ -21,7 +20,7 @@
from pandas.compat import lrange, range, u
-class TestSeriesDtypes():
+class TestSeriesDtypes(object):
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range('20130101', periods=3))
@@ -396,40 +395,30 @@ def test_astype_categoricaldtype_with_args(self):
with pytest.raises(TypeError):
s.astype(type_, categories=['a', 'b'], ordered=False)
- def test_astype_generic_timestamp_deprecated(self):
- # see gh-15524
+ @pytest.mark.parametrize("dtype", [
+ np.datetime64,
+ np.timedelta64,
+ ])
+ def test_astype_generic_timestamp_no_frequency(self, dtype):
+ # see gh-15524, gh-15987
data = [1]
+ s = Series(data)
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- s = Series(data)
- dtype = np.datetime64
- result = s.astype(dtype)
- expected = Series(data, dtype=dtype)
- tm.assert_series_equal(result, expected)
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- s = Series(data)
- dtype = np.timedelta64
- result = s.astype(dtype)
- expected = Series(data, dtype=dtype)
- tm.assert_series_equal(result, expected)
+ msg = "dtype has no frequency. Please pass in"
+ with tm.assert_raises_regex(ValueError, msg):
+ s.astype(dtype)
@pytest.mark.parametrize("dtype", np.typecodes['All'])
def test_astype_empty_constructor_equality(self, dtype):
# see gh-15524
- if dtype not in ('S', 'V'): # poor support (if any) currently
- with warnings.catch_warnings(record=True):
- if dtype in ('M', 'm'):
- # Generic timestamp dtypes ('M' and 'm') are deprecated,
- # but we test that already in series/test_constructors.py
- warnings.simplefilter("ignore", FutureWarning)
-
- init_empty = Series([], dtype=dtype)
- as_type_empty = Series([]).astype(dtype)
- tm.assert_series_equal(init_empty, as_type_empty)
+ if dtype not in (
+ "S", "V", # poor support (if any) currently
+ "M", "m" # Generic timestamps raise a ValueError. Already tested.
+ ):
+ init_empty = Series([], dtype=dtype)
+ as_type_empty = Series([]).astype(dtype)
+ tm.assert_series_equal(init_empty, as_type_empty)
def test_complex(self):
# see gh-4819: complex access for ndarray compat
| Previously deprecated for `Series` constructor and the `.astype` method. Now being enforced.
xref #15987. | https://api.github.com/repos/pandas-dev/pandas/pulls/23392 | 2018-10-28T00:46:00Z | 2018-10-28T13:34:22Z | 2018-10-28T13:34:22Z | 2018-10-28T23:56:34Z |
CLN: Remove some dtype methods from API | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 51c398518c153..863a29f68d3ee 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -954,6 +954,7 @@ Removal of prior version deprecations/changes
- :meth:`DataFrame.sortlevel` and :meth:`Series.sortlevel` have been removed (:issue:`15099`)
- :meth:`SparseSeries.to_dense` has dropped the ``sparse_only`` parameter (:issue:`14686`)
- :meth:`DataFrame.astype` and :meth:`Series.astype` have renamed the ``raise_on_error`` argument to ``errors`` (:issue:`14967`)
+- ``is_sequence``, ``is_any_int_dtype``, and ``is_floating_dtype`` have been removed from ``pandas.api.types`` (:issue:`16163`, :issue:`16189`)
.. _whatsnew_0240.performance:
diff --git a/pandas/core/dtypes/api.py b/pandas/core/dtypes/api.py
index 738e1ea9062f6..7bf3912b05b1d 100644
--- a/pandas/core/dtypes/api.py
+++ b/pandas/core/dtypes/api.py
@@ -1,7 +1,5 @@
# flake8: noqa
-import sys
-
from .common import (pandas_dtype,
is_dtype_equal,
is_extension_type,
@@ -59,24 +57,3 @@
is_list_like,
is_hashable,
is_named_tuple)
-
-
-# deprecated
-m = sys.modules['pandas.core.dtypes.api']
-
-for t in ['is_any_int_dtype', 'is_floating_dtype', 'is_sequence']:
-
- def outer(t=t):
-
- def wrapper(arr_or_dtype):
- import warnings
- import pandas
- warnings.warn("{t} is deprecated and will be "
- "removed in a future version".format(t=t),
- FutureWarning, stacklevel=3)
- return getattr(pandas.core.dtypes.common, t)(arr_or_dtype)
- return wrapper
-
- setattr(m, t, outer(t))
-
-del sys, m, t, outer
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index da26c2ef74b41..938392ebfc96d 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -796,11 +796,11 @@ def is_dtype_union_equal(source, target):
def is_any_int_dtype(arr_or_dtype):
"""Check whether the provided array or dtype is of an integer dtype.
- .. deprecated:: 0.20.0
-
In this function, timedelta64 instances are also considered "any-integer"
type objects and will return True.
+ This function is internal and should not be exposed in the public API.
+
Parameters
----------
arr_or_dtype : array-like
@@ -1560,6 +1560,8 @@ def is_float_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a float dtype.
+ This function is internal and should not be exposed in the public API.
+
Parameters
----------
arr_or_dtype : array-like
diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py
index 4ea501dacddf3..c36af4404e646 100644
--- a/pandas/tests/api/test_types.py
+++ b/pandas/tests/api/test_types.py
@@ -27,7 +27,7 @@ class TestTypes(Base):
'is_list_like', 'is_hashable', 'is_array_like',
'is_named_tuple',
'pandas_dtype', 'union_categoricals', 'infer_dtype']
- deprecated = ['is_any_int_dtype', 'is_floating_dtype', 'is_sequence']
+ deprecated = []
dtypes = ['CategoricalDtype', 'DatetimeTZDtype',
'PeriodDtype', 'IntervalDtype']
| Removes the following from the public API:
* `pandas.api.types.is_sequence`
* `pandas.api.types.is_any_int_dtype`
* `pandas.api.types.is_floating_dtype`
xref #16163.
xref #16189.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23390 | 2018-10-27T23:52:34Z | 2018-10-28T13:41:18Z | 2018-10-28T13:41:18Z | 2018-10-28T18:47:20Z |
style: fix import format at pandas/core/computation | diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py
index 22c8b641cf974..f7f40a66af9c6 100644
--- a/pandas/core/computation/align.py
+++ b/pandas/core/computation/align.py
@@ -1,15 +1,16 @@
"""Core eval alignment algorithms
"""
-import warnings
from functools import partial, wraps
-from pandas.compat import zip, range
+import warnings
import numpy as np
+from pandas.compat import range, zip
+from pandas.errors import PerformanceWarning
+
import pandas as pd
from pandas import compat
-from pandas.errors import PerformanceWarning
import pandas.core.common as com
from pandas.core.computation.common import _result_type_many
diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py
index 06f72bb36de5c..d2d5e018063ff 100644
--- a/pandas/core/computation/check.py
+++ b/pandas/core/computation/check.py
@@ -1,5 +1,5 @@
-import warnings
from distutils.version import LooseVersion
+import warnings
_NUMEXPR_INSTALLED = False
_MIN_NUMEXPR_VERSION = "2.6.1"
diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py
index 105cc497a4207..e7eca04e413c5 100644
--- a/pandas/core/computation/common.py
+++ b/pandas/core/computation/common.py
@@ -1,7 +1,9 @@
import numpy as np
-import pandas as pd
+
from pandas.compat import reduce
+import pandas as pd
+
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py
index 155ff554cf99c..bccd37131c81a 100644
--- a/pandas/core/computation/engines.py
+++ b/pandas/core/computation/engines.py
@@ -4,14 +4,14 @@
import abc
-from pandas import compat
from pandas.compat import map
-import pandas.io.formats.printing as printing
+
+from pandas import compat
from pandas.core.computation.align import _align, _reconstruct_object
from pandas.core.computation.ops import (
- UndefinedVariableError,
- _mathops, _reductions)
+ UndefinedVariableError, _mathops, _reductions)
+import pandas.io.formats.printing as printing
_ne_builtins = frozenset(_mathops + _reductions)
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py
index 7025f3000eb5f..4b9ba02ed85a4 100644
--- a/pandas/core/computation/eval.py
+++ b/pandas/core/computation/eval.py
@@ -3,14 +3,17 @@
"""Top level ``eval`` module.
"""
-import warnings
import tokenize
-from pandas.io.formats.printing import pprint_thing
-from pandas.core.computation.scope import _ensure_scope
+import warnings
+
from pandas.compat import string_types
-from pandas.core.computation.engines import _engines
from pandas.util._validators import validate_bool_kwarg
+from pandas.core.computation.engines import _engines
+from pandas.core.computation.scope import _ensure_scope
+
+from pandas.io.formats.printing import pprint_thing
+
def _check_engine(engine):
"""Make sure a valid engine is passed.
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py
index b68b6970a89cc..9a44198ba3b86 100644
--- a/pandas/core/computation/expr.py
+++ b/pandas/core/computation/expr.py
@@ -2,25 +2,25 @@
"""
import ast
+from functools import partial
import tokenize
-from functools import partial
import numpy as np
+from pandas.compat import StringIO, lmap, reduce, string_types, zip
+
import pandas as pd
from pandas import compat
-from pandas.compat import StringIO, lmap, zip, reduce, string_types
-from pandas.core.base import StringMixin
from pandas.core import common as com
-import pandas.io.formats.printing as printing
-from pandas.core.reshape.util import compose
+from pandas.core.base import StringMixin
from pandas.core.computation.ops import (
- _cmp_ops_syms, _bool_ops_syms,
- _arith_ops_syms, _unary_ops_syms, is_term)
-from pandas.core.computation.ops import _reductions, _mathops, _LOCAL_TAG
-from pandas.core.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
-from pandas.core.computation.ops import UndefinedVariableError, FuncNode
+ _LOCAL_TAG, BinOp, Constant, Div, FuncNode, Op, Term, UnaryOp,
+ UndefinedVariableError, _arith_ops_syms, _bool_ops_syms, _cmp_ops_syms,
+ _mathops, _reductions, _unary_ops_syms, is_term)
from pandas.core.computation.scope import Scope
+from pandas.core.reshape.util import compose
+
+import pandas.io.formats.printing as printing
def tokenize_string(source):
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py
index c12056a3ee78c..d44fae624a91c 100644
--- a/pandas/core/computation/expressions.py
+++ b/pandas/core/computation/expressions.py
@@ -7,6 +7,7 @@
"""
import warnings
+
import numpy as np
import pandas.core.common as com
diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py
index ca0c4db4947c4..9e9f124352229 100644
--- a/pandas/core/computation/ops.py
+++ b/pandas/core/computation/ops.py
@@ -1,21 +1,23 @@
"""Operator classes for eval.
"""
-import operator as op
-from functools import partial
from datetime import datetime
+from functools import partial
+import operator as op
import numpy as np
+from pandas.compat import PY3, string_types, text_type
+
from pandas.core.dtypes.common import is_list_like, is_scalar
+
import pandas as pd
-from pandas.compat import PY3, string_types, text_type
-import pandas.core.common as com
-from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.base import StringMixin
+import pandas.core.common as com
from pandas.core.computation.common import _ensure_decoded, _result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
+from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
_reductions = 'sum', 'prod'
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index e08df3e340138..db409b215a78d 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -2,20 +2,24 @@
import ast
from functools import partial
+
import numpy as np
-import pandas as pd
+
+from pandas.compat import DeepChainMap, string_types, u
from pandas.core.dtypes.common import is_list_like
-import pandas.core.common as com
-from pandas.compat import u, string_types, DeepChainMap
+
+import pandas as pd
from pandas.core.base import StringMixin
-from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
+import pandas.core.common as com
from pandas.core.computation import expr, ops
-from pandas.core.computation.ops import is_term, UndefinedVariableError
-from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.common import _ensure_decoded
+from pandas.core.computation.expr import BaseExprVisitor
+from pandas.core.computation.ops import UndefinedVariableError, is_term
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
+from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
+
class Scope(expr.Scope):
__slots__ = 'queryables',
diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py
index c3128be0f5599..33c5a1c2e0f0a 100644
--- a/pandas/core/computation/scope.py
+++ b/pandas/core/computation/scope.py
@@ -2,18 +2,18 @@
Module for scope operations
"""
-import sys
-import struct
-import inspect
import datetime
+import inspect
import itertools
import pprint
+import struct
+import sys
import numpy as np
-import pandas
+from pandas.compat import DeepChainMap, StringIO, map
+
import pandas as pd # noqa
-from pandas.compat import DeepChainMap, map, StringIO
from pandas.core.base import StringMixin
import pandas.core.computation as compu
@@ -48,7 +48,7 @@ def _raw_hex_id(obj):
_DEFAULT_GLOBALS = {
- 'Timestamp': pandas._libs.tslib.Timestamp,
+ 'Timestamp': pd._libs.tslib.Timestamp,
'datetime': datetime.datetime,
'True': True,
'False': False,
diff --git a/setup.cfg b/setup.cfg
index 26269c3318953..0988c83bdc93d 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -164,16 +164,6 @@ skip=
pandas/core/sparse/series.py,
pandas/core/sparse/frame.py,
pandas/core/sparse/scipy_sparse.py,
- pandas/core/computation/check.py,
- pandas/core/computation/ops.py,
- pandas/core/computation/pytables.py,
- pandas/core/computation/eval.py,
- pandas/core/computation/expressions.py,
- pandas/core/computation/common.py,
- pandas/core/computation/engines.py,
- pandas/core/computation/expr.py,
- pandas/core/computation/align.py,
- pandas/core/computation/scope.py,
pandas/tests/test_errors.py,
pandas/tests/test_base.py,
pandas/tests/test_register_accessor.py,
| - [ ] xref #23334
- [x] passes `git diff master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23388 | 2018-10-27T23:27:24Z | 2018-11-01T00:04:45Z | 2018-11-01T00:04:45Z | 2018-11-01T00:04:49Z |
style: fix import format at pandas/core/reshape | diff --git a/pandas/core/reshape/api.py b/pandas/core/reshape/api.py
index 7ac1c0cb52fe3..3c76eef809c7a 100644
--- a/pandas/core/reshape/api.py
+++ b/pandas/core/reshape/api.py
@@ -1,8 +1,8 @@
# flake8: noqa
from pandas.core.reshape.concat import concat
-from pandas.core.reshape.melt import melt, lreshape, wide_to_long
+from pandas.core.reshape.melt import lreshape, melt, wide_to_long
+from pandas.core.reshape.merge import merge, merge_asof, merge_ordered
+from pandas.core.reshape.pivot import crosstab, pivot, pivot_table
from pandas.core.reshape.reshape import get_dummies
-from pandas.core.reshape.merge import merge, merge_ordered, merge_asof
-from pandas.core.reshape.pivot import pivot_table, pivot, crosstab
from pandas.core.reshape.tile import cut, qcut
diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py
index aa380d95e5f36..9f8564541a936 100644
--- a/pandas/core/reshape/concat.py
+++ b/pandas/core/reshape/concat.py
@@ -3,17 +3,19 @@
"""
import numpy as np
-from pandas import compat, DataFrame, Series, Index, MultiIndex
-from pandas.core.index import (_get_objs_combined_axis,
- ensure_index, _get_consensus_names,
- _all_indexes_same)
-from pandas.core.arrays.categorical import (_factorize_from_iterable,
- _factorize_from_iterables)
-from pandas.core.internals import concatenate_block_managers
+
+import pandas.core.dtypes.concat as _concat
+
+from pandas import DataFrame, Index, MultiIndex, Series, compat
from pandas.core import common as com
-import pandas.core.indexes.base as ibase
+from pandas.core.arrays.categorical import (
+ _factorize_from_iterable, _factorize_from_iterables)
from pandas.core.generic import NDFrame
-import pandas.core.dtypes.concat as _concat
+from pandas.core.index import (
+ _all_indexes_same, _get_consensus_names, _get_objs_combined_axis,
+ ensure_index)
+import pandas.core.indexes.base as ibase
+from pandas.core.internals import concatenate_block_managers
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index 26221143c0cdf..6596e055db1a8 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -1,21 +1,20 @@
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
+import re
+
import numpy as np
-from pandas.core.dtypes.common import is_list_like
-from pandas import compat
-from pandas.core.arrays import Categorical
+from pandas.util._decorators import Appender
+from pandas.core.dtypes.common import is_extension_type, is_list_like
from pandas.core.dtypes.generic import ABCMultiIndex
+from pandas.core.dtypes.missing import notna
+from pandas import compat
+from pandas.core.arrays import Categorical
from pandas.core.frame import _shared_docs
-from pandas.util._decorators import Appender
-
-import re
-from pandas.core.dtypes.missing import notna
-from pandas.core.dtypes.common import is_extension_type
-from pandas.core.tools.numeric import to_numeric
from pandas.core.reshape.concat import concat
+from pandas.core.tools.numeric import to_numeric
@Appender(_shared_docs['melt'] %
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 5d4a0c718499a..e09cf0a527ff9 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -3,49 +3,34 @@
"""
import copy
-import warnings
import string
+import warnings
import numpy as np
-from pandas.compat import range, lzip, zip, map, filter
-import pandas.compat as compat
-from pandas import (Categorical, DataFrame,
- Index, MultiIndex, Timedelta, Series)
-from pandas.core.arrays.categorical import _recode_for_categories
-from pandas.core.frame import _merge_doc
-from pandas.core.dtypes.common import (
- is_datetime64tz_dtype,
- is_datetime64_dtype,
- needs_i8_conversion,
- is_int64_dtype,
- is_array_like,
- is_categorical_dtype,
- is_integer_dtype,
- is_float_dtype,
- is_number,
- is_numeric_dtype,
- is_integer,
- is_int_or_datetime_dtype,
- is_dtype_equal,
- is_bool,
- is_bool_dtype,
- is_list_like,
- is_datetimelike,
- ensure_int64,
- ensure_float64,
- ensure_object)
-from pandas.core.dtypes.missing import na_value_for_dtype, isnull
-from pandas.core.internals import (items_overlap_with_suffix,
- concatenate_block_managers)
+from pandas._libs import hashtable as libhashtable, join as libjoin, lib
+import pandas.compat as compat
+from pandas.compat import filter, lzip, map, range, zip
+from pandas.errors import MergeError
from pandas.util._decorators import Appender, Substitution
-from pandas.core.sorting import is_int64_overflow_possible
+from pandas.core.dtypes.common import (
+ ensure_float64, ensure_int64, ensure_object, is_array_like, is_bool,
+ is_bool_dtype, is_categorical_dtype, is_datetime64_dtype,
+ is_datetime64tz_dtype, is_datetimelike, is_dtype_equal, is_float_dtype,
+ is_int64_dtype, is_int_or_datetime_dtype, is_integer, is_integer_dtype,
+ is_list_like, is_number, is_numeric_dtype, needs_i8_conversion)
+from pandas.core.dtypes.missing import isnull, na_value_for_dtype
+
+from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timedelta
import pandas.core.algorithms as algos
-import pandas.core.sorting as sorting
+from pandas.core.arrays.categorical import _recode_for_categories
import pandas.core.common as com
-from pandas._libs import hashtable as libhashtable, join as libjoin, lib
-from pandas.errors import MergeError
+from pandas.core.frame import _merge_doc
+from pandas.core.internals import (
+ concatenate_block_managers, items_overlap_with_suffix)
+import pandas.core.sorting as sorting
+from pandas.core.sorting import is_int64_overflow_possible
@Substitution('\nleft : DataFrame')
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py
index 22e591e776a22..ec4cdffc56435 100644
--- a/pandas/core/reshape/pivot.py
+++ b/pandas/core/reshape/pivot.py
@@ -1,28 +1,25 @@
# pylint: disable=E1103
+import numpy as np
+from pandas.compat import lrange, range, zip
+from pandas.util._decorators import Appender, Substitution
-from pandas.core.dtypes.common import (
- is_list_like, is_scalar, is_integer_dtype)
-from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
+from pandas.core.dtypes.common import is_integer_dtype, is_list_like, is_scalar
+from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
-from pandas.core.reshape.concat import concat
-from pandas.core.series import Series
-from pandas.core.groupby import Grouper
-from pandas.core.reshape.util import cartesian_product
-from pandas.core.index import Index, MultiIndex, _get_objs_combined_axis
-from pandas.compat import range, lrange, zip
from pandas import compat
import pandas.core.common as com
-from pandas.util._decorators import Appender, Substitution
-
from pandas.core.frame import _shared_docs
-# Note: We need to make sure `frame` is imported before `pivot`, otherwise
-# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency
-
-import numpy as np
+from pandas.core.groupby import Grouper
+from pandas.core.index import Index, MultiIndex, _get_objs_combined_axis
+from pandas.core.reshape.concat import concat
+from pandas.core.reshape.util import cartesian_product
+from pandas.core.series import Series
+# Note: We need to make sure `frame` is imported before `pivot`, otherwise
+# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency
@Substitution('\ndata : DataFrame')
@Appender(_shared_docs['pivot_table'], indents=1)
def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 03b77f0e787f0..d3b677a1df2a3 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -1,36 +1,31 @@
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
-from pandas.compat import range, text_type, zip, u, PY2
-from pandas import compat
from functools import partial
import itertools
import numpy as np
-from pandas.core.dtypes.common import (
- ensure_platform_int,
- is_list_like, is_bool_dtype,
- is_extension_array_dtype,
- needs_i8_conversion, is_sparse, is_object_dtype)
-from pandas.core.dtypes.cast import maybe_promote
-from pandas.core.dtypes.missing import notna
-
-from pandas.core.series import Series
-from pandas.core.frame import DataFrame
-
-from pandas.core.sparse.api import SparseDataFrame, SparseSeries
-from pandas.core.arrays import SparseArray
+from pandas._libs import algos as _algos, reshape as _reshape
from pandas._libs.sparse import IntIndex
+from pandas.compat import PY2, range, text_type, u, zip
-from pandas.core.arrays import Categorical
-from pandas.core.arrays.categorical import _factorize_from_iterable
-from pandas.core.sorting import (get_group_index, get_compressed_ids,
- compress_group_index, decons_obs_group_ids)
+from pandas.core.dtypes.cast import maybe_promote
+from pandas.core.dtypes.common import (
+ ensure_platform_int, is_bool_dtype, is_extension_array_dtype, is_list_like,
+ is_object_dtype, is_sparse, needs_i8_conversion)
+from pandas.core.dtypes.missing import notna
+from pandas import compat
import pandas.core.algorithms as algos
-from pandas._libs import algos as _algos, reshape as _reshape
-
+from pandas.core.arrays import Categorical, SparseArray
+from pandas.core.arrays.categorical import _factorize_from_iterable
+from pandas.core.frame import DataFrame
from pandas.core.index import Index, MultiIndex
+from pandas.core.series import Series
+from pandas.core.sorting import (
+ compress_group_index, decons_obs_group_ids, get_compressed_ids,
+ get_group_index)
+from pandas.core.sparse.api import SparseDataFrame, SparseSeries
class _Unstacker(object):
diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py
index 031c94c06d3c8..4a863372eea13 100644
--- a/pandas/core/reshape/tile.py
+++ b/pandas/core/reshape/tile.py
@@ -3,25 +3,21 @@
"""
from functools import partial
-from pandas.core.dtypes.missing import isna
+import numpy as np
+
+from pandas._libs.lib import infer_dtype
+
from pandas.core.dtypes.common import (
- is_integer,
- is_scalar,
- is_categorical_dtype,
- is_datetime64_dtype,
- is_timedelta64_dtype,
- is_datetime64tz_dtype,
- is_datetime_or_timedelta_dtype,
- ensure_int64)
+ ensure_int64, is_categorical_dtype, is_datetime64_dtype,
+ is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_integer,
+ is_scalar, is_timedelta64_dtype)
+from pandas.core.dtypes.missing import isna
+from pandas import (
+ Categorical, Index, Interval, IntervalIndex, Series, Timedelta, Timestamp,
+ to_datetime, to_timedelta)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
-from pandas._libs.lib import infer_dtype
-from pandas import (to_timedelta, to_datetime,
- Categorical, Timestamp, Timedelta,
- Series, Index, Interval, IntervalIndex)
-
-import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py
index 1c2033d90cd8a..07f7272398777 100644
--- a/pandas/core/reshape/util.py
+++ b/pandas/core/reshape/util.py
@@ -1,8 +1,9 @@
import numpy as np
+from pandas.compat import reduce
+
from pandas.core.dtypes.common import is_list_like
-from pandas.compat import reduce
from pandas.core import common as com
diff --git a/setup.cfg b/setup.cfg
index 26269c3318953..bbf12872ffdb9 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -152,14 +152,6 @@ skip=
pandas/core/internals/concat.py,
pandas/core/internals/managers.py,
pandas/core/internals/blocks.py,
- pandas/core/reshape/concat.py,
- pandas/core/reshape/tile.py,
- pandas/core/reshape/melt.py,
- pandas/core/reshape/api.py,
- pandas/core/reshape/util.py,
- pandas/core/reshape/merge.py,
- pandas/core/reshape/reshape.py,
- pandas/core/reshape/pivot.py,
pandas/core/sparse/api.py,
pandas/core/sparse/series.py,
pandas/core/sparse/frame.py,
| - [ ] xref #23334
- [x] passes `git diff master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23387 | 2018-10-27T23:16:56Z | 2018-11-01T01:10:13Z | 2018-11-01T01:10:13Z | 2018-11-01T01:10:16Z |
CLN: Cleanup toplevel namespace shims | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 51c398518c153..3841b3c924724 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -943,6 +943,7 @@ Removal of prior version deprecations/changes
- Strings passed into :meth:`DataFrame.groupby` that refer to both column and index levels will raise a ``ValueError`` (:issue:`14432`)
- :meth:`Index.repeat` and :meth:`MultiIndex.repeat` have renamed the ``n`` argument to ``repeats`` (:issue:`14645`)
- Removal of the previously deprecated ``as_indexer`` keyword completely from ``str.match()`` (:issue:`22356`, :issue:`6581`)
+- The modules ``pandas.types``, ``pandas.computation``, and ``pandas.util.decorators`` have been removed (:issue:`16157`, :issue:`16250`)
- Removed the ``pandas.formats.style`` shim for :class:`pandas.io.formats.style.Styler` (:issue:`16059`)
- :func:`pandas.pnow`, :func:`pandas.match`, :func:`pandas.groupby`, :func:`pd.get_store`, ``pd.Expr``, and ``pd.Term`` have been removed (:issue:`15538`, :issue:`15940`)
- :meth:`Categorical.searchsorted` and :meth:`Series.searchsorted` have renamed the ``v`` argument to ``value`` (:issue:`14645`)
diff --git a/pandas/computation/__init__.py b/pandas/computation/__init__.py
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py
deleted file mode 100644
index d194cd2404c9d..0000000000000
--- a/pandas/computation/expressions.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import warnings
-
-
-def set_use_numexpr(v=True):
- """
- .. deprecated:: 0.20.0
- Use ``pandas.set_option('compute.use_numexpr', v)`` instead.
- """
- warnings.warn("pandas.computation.expressions.set_use_numexpr is "
- "deprecated and will be removed in a future version.\n"
- "you can toggle usage of numexpr via "
- "pandas.get_option('compute.use_numexpr')",
- FutureWarning, stacklevel=2)
- from pandas import set_option
- set_option('compute.use_numexpr', v)
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index bb544d2ee81fd..fef87c9e6c4c2 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -135,20 +135,6 @@ def test_TimeGrouper(self):
pd.TimeGrouper(freq='D')
-class TestTypes(object):
-
- def test_deprecation_access_func(self):
- with tm.assert_produces_warning(
- FutureWarning, check_stacklevel=False):
- from pandas.types.concat import union_categoricals
- c1 = pd.Categorical(list('aabc'))
- c2 = pd.Categorical(list('abcd'))
- union_categoricals(
- [c1, c2],
- sort_categories=True,
- ignore_order=True)
-
-
class TestCDateRange(object):
def test_deprecation_cdaterange(self):
diff --git a/pandas/types/__init__.py b/pandas/types/__init__.py
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/pandas/types/common.py b/pandas/types/common.py
deleted file mode 100644
index a125c27d04596..0000000000000
--- a/pandas/types/common.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import warnings
-
-warnings.warn("pandas.types.common is deprecated and will be "
- "removed in a future version, import "
- "from pandas.api.types",
- DeprecationWarning, stacklevel=3)
-
-from pandas.core.dtypes.common import * # noqa
diff --git a/pandas/types/concat.py b/pandas/types/concat.py
deleted file mode 100644
index 477156b38d56d..0000000000000
--- a/pandas/types/concat.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import warnings
-
-
-def union_categoricals(to_union, sort_categories=False, ignore_order=False):
- warnings.warn("pandas.types.concat.union_categoricals is "
- "deprecated and will be removed in a future version.\n"
- "use pandas.api.types.union_categoricals",
- FutureWarning, stacklevel=2)
- from pandas.api.types import union_categoricals
- return union_categoricals(
- to_union, sort_categories=sort_categories, ignore_order=ignore_order)
diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
deleted file mode 100644
index 54bb834e829f3..0000000000000
--- a/pandas/util/decorators.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import warnings
-
-warnings.warn("pandas.util.decorators is deprecated and will be "
- "removed in a future version, import "
- "from pandas.util",
- DeprecationWarning, stacklevel=3)
-
-from pandas.util._decorators import * # noqa
| Removes the following:
* `pandas.types`
* `pandas.computation`
* `pandas.util.decorators`
xref #16157.
xref #16250. | https://api.github.com/repos/pandas-dev/pandas/pulls/23386 | 2018-10-27T23:05:57Z | 2018-10-28T13:40:23Z | 2018-10-28T13:40:23Z | 2018-10-28T18:46:06Z |
DOC: Update DateOffset intro in timeseries.rst | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index cc377f45c4b8d..7bb00f9b9aa7d 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -4,7 +4,7 @@
.. ipython:: python
:suppress:
- from datetime import datetime, timedelta, time
+ import datetime
import numpy as np
import pandas as pd
from pandas import offsets
@@ -32,7 +32,7 @@ Parsing time series information from various sources and formats
.. ipython:: python
- dti = pd.to_datetime(['1/1/2018', np.datetime64('2018-01-01'), datetime(2018, 1, 1)])
+ dti = pd.to_datetime(['1/1/2018', np.datetime64('2018-01-01'), datetime.datetime(2018, 1, 1)])
dti
Generate sequences of fixed-frequency dates and time spans
@@ -69,7 +69,7 @@ Performing date and time arithmetic with absolute or relative time increments
saturday = friday + pd.Timedelta('1 day')
saturday.day_name()
# Add 1 business day (Friday --> Monday)
- monday = friday + pd.tseries.offsets.BDay()
+ monday = friday + pd.offsets.BDay()
monday.day_name()
pandas provides a relatively compact and self-contained set of tools for
@@ -110,12 +110,14 @@ However, :class:`Series` and :class:`DataFrame` can directly also support the ti
pd.Series(pd.date_range('2000', freq='D', periods=3))
-:class:`Series` and :class:`DataFrame` have extended data type support and functionality for ``datetime`` and ``timedelta``
-data when the time data is used as data itself. The ``Period`` and ``DateOffset`` data will be stored as ``object`` data.
+:class:`Series` and :class:`DataFrame` have extended data type support and functionality for ``datetime``, ``timedelta``
+and ``Period`` data when passed into those constructors. ``DateOffset``
+data however will be stored as ``object`` data.
.. ipython:: python
pd.Series(pd.period_range('1/1/2011', freq='M', periods=3))
+ pd.Series([pd.DateOffset(1), pd.DateOffset(2)])
pd.Series(pd.date_range('1/1/2011', freq='M', periods=3))
Lastly, pandas represents null date times, time deltas, and time spans as ``NaT`` which
@@ -141,7 +143,7 @@ time.
.. ipython:: python
- pd.Timestamp(datetime(2012, 5, 1))
+ pd.Timestamp(datetime.datetime(2012, 5, 1))
pd.Timestamp('2012-05-01')
pd.Timestamp(2012, 5, 1)
@@ -400,7 +402,7 @@ To generate an index with timestamps, you can use either the ``DatetimeIndex`` o
.. ipython:: python
- dates = [datetime(2012, 5, 1), datetime(2012, 5, 2), datetime(2012, 5, 3)]
+ dates = [datetime.datetime(2012, 5, 1), datetime.datetime(2012, 5, 2), datetime.datetime(2012, 5, 3)]
# Note the frequency information
index = pd.DatetimeIndex(dates)
@@ -418,8 +420,8 @@ to create a ``DatetimeIndex``. The default frequency for ``date_range`` is a
.. ipython:: python
- start = datetime(2011, 1, 1)
- end = datetime(2012, 1, 1)
+ start = datetime.datetime(2011, 1, 1)
+ end = datetime.datetime(2012, 1, 1)
index = pd.date_range(start, end)
index
@@ -486,7 +488,7 @@ used if a custom frequency string is passed.
weekmask = 'Mon Wed Fri'
- holidays = [datetime(2011, 1, 5), datetime(2011, 3, 14)]
+ holidays = [datetime.datetime(2011, 1, 5), datetime.datetime(2011, 3, 14)]
pd.bdate_range(start, end, freq='C', weekmask=weekmask, holidays=holidays)
@@ -564,7 +566,7 @@ Dates and strings that parse to timestamps can be passed as indexing parameters:
ts['1/31/2011']
- ts[datetime(2011, 12, 25):]
+ ts[datetime.datetime(2011, 12, 25):]
ts['10/31/2011':'12/31/2011']
@@ -716,13 +718,13 @@ These ``Timestamp`` and ``datetime`` objects have exact ``hours, minutes,`` and
.. ipython:: python
- dft[datetime(2013, 1, 1):datetime(2013,2,28)]
+ dft[datetime.datetime(2013, 1, 1):datetime.datetime(2013,2,28)]
With no defaults.
.. ipython:: python
- dft[datetime(2013, 1, 1, 10, 12, 0):datetime(2013, 2, 28, 10, 12, 0)]
+ dft[datetime.datetime(2013, 1, 1, 10, 12, 0):datetime.datetime(2013, 2, 28, 10, 12, 0)]
Truncating & Fancy Indexing
@@ -823,120 +825,119 @@ on :ref:`.dt accessors<basics.dt_accessors>`.
DateOffset Objects
------------------
-In the preceding examples, we created ``DatetimeIndex`` objects at various
-frequencies by passing in :ref:`frequency strings <timeseries.offset_aliases>`
-like 'M', 'W', and 'BM' to the ``freq`` keyword. Under the hood, these frequency
-strings are being translated into an instance of :class:`DateOffset`,
-which represents a regular frequency increment. Specific offset logic like
-"month", "business day", or "one hour" is represented in its various subclasses.
+In the preceding examples, frequency strings (e.g. ``'D'``) were used to specify
+a frequency that defined:
-.. csv-table::
- :header: "Class name", "Description"
- :widths: 15, 65
-
- DateOffset, "Generic offset class, defaults to 1 calendar day"
- BDay, "business day (weekday)"
- CDay, "custom business day"
- Week, "one week, optionally anchored on a day of the week"
- WeekOfMonth, "the x-th day of the y-th week of each month"
- LastWeekOfMonth, "the x-th day of the last week of each month"
- MonthEnd, "calendar month end"
- MonthBegin, "calendar month begin"
- BMonthEnd, "business month end"
- BMonthBegin, "business month begin"
- CBMonthEnd, "custom business month end"
- CBMonthBegin, "custom business month begin"
- SemiMonthEnd, "15th (or other day_of_month) and calendar month end"
- SemiMonthBegin, "15th (or other day_of_month) and calendar month begin"
- QuarterEnd, "calendar quarter end"
- QuarterBegin, "calendar quarter begin"
- BQuarterEnd, "business quarter end"
- BQuarterBegin, "business quarter begin"
- FY5253Quarter, "retail (aka 52-53 week) quarter"
- YearEnd, "calendar year end"
- YearBegin, "calendar year begin"
- BYearEnd, "business year end"
- BYearBegin, "business year begin"
- FY5253, "retail (aka 52-53 week) year"
- BusinessHour, "business hour"
- CustomBusinessHour, "custom business hour"
- Hour, "one hour"
- Minute, "one minute"
- Second, "one second"
- Milli, "one millisecond"
- Micro, "one microsecond"
- Nano, "one nanosecond"
-
-The basic ``DateOffset`` takes the same arguments as
-``dateutil.relativedelta``, which works as follows:
-
-.. ipython:: python
+* how the date times in :class:`DatetimeIndex` were spaced when using :meth:`date_range`
+* the frequency of a :class:`Period` or :class:`PeriodIndex`
- d = datetime(2008, 8, 18, 9, 0)
- d + relativedelta(months=4, days=5)
+These frequency strings map to a :class:`DateOffset` object and its subclasses. A :class:`DateOffset`
+is similar to a :class:`Timedelta` that represents a duration of time but follows specific calendar duration rules.
+For example, a :class:`Timedelta` day will always increment ``datetimes`` by 24 hours, while a :class:`DateOffset` day
+will increment ``datetimes`` to the same time the next day whether a day represents 23, 24 or 25 hours due to daylight
+savings time. However, all :class:`DateOffset` subclasses that are an hour or smaller
+(``Hour``, ``Minute``, ``Second``, ``Milli``, ``Micro``, ``Nano``) behave like
+:class:`Timedelta` and respect absolute time.
-We could have done the same thing with ``DateOffset``:
+The basic :class:`DateOffset` acts similar to ``dateutil.relativedelta`` (`relativedelta documentation`_)
+that shifts a date time by the corresponding calendar duration specified. The
+arithmetic operator (``+``) or the ``apply`` method can be used to perform the shift.
.. ipython:: python
- from pandas.tseries.offsets import *
- d + DateOffset(months=4, days=5)
-
-The key features of a ``DateOffset`` object are:
-
-* It can be added / subtracted to/from a datetime object to obtain a
- shifted date.
-* It can be multiplied by an integer (positive or negative) so that the
- increment will be applied multiple times.
-* It has :meth:`~pandas.DateOffset.rollforward` and
- :meth:`~pandas.DateOffset.rollback` methods for moving a date forward or
- backward to the next or previous "offset date".
-
-Subclasses of ``DateOffset`` define the ``apply`` function which dictates
-custom date increment logic, such as adding business days:
-
-.. code-block:: python
-
- class BDay(DateOffset):
- """DateOffset increments between business days"""
- def apply(self, other):
- ...
-
-.. ipython:: python
-
- d - 5 * BDay()
- d + BMonthEnd()
-
-The ``rollforward`` and ``rollback`` methods do exactly what you would expect:
-
-.. ipython:: python
-
- d
- offset = BMonthEnd()
- offset.rollforward(d)
- offset.rollback(d)
-
-It's definitely worth exploring the ``pandas.tseries.offsets`` module and the
-various docstrings for the classes.
+ # This particular day contains a day light savings time transition
+ ts = pd.Timestamp('2016-10-30 00:00:00', tz='Europe/Helsinki')
+ # Respects absolute time
+ ts + pd.Timedelta(days=1)
+ # Respects calendar time
+ ts + pd.DateOffset(days=1)
+ friday = pd.Timestamp('2018-01-05')
+ friday.day_name()
+ # Add 2 business days (Friday --> Tuesday)
+ two_business_days = 2 * pd.offsets.BDay()
+ two_business_days.apply(friday)
+ friday + two_business_days
+ (friday + two_business_days).day_name()
+
+Most ``DateOffsets`` have associated frequencies strings, or offset aliases, that can be passed
+into ``freq`` keyword arguments. The available date offsets and associated frequency strings can be found below:
-These operations (``apply``, ``rollforward`` and ``rollback``) preserve time
-(hour, minute, etc) information by default. To reset time, use ``normalize``
-before or after applying the operation (depending on whether you want the
-time information included in the operation.
+.. csv-table::
+ :header: "Date Offset", "Frequency String", "Description"
+ :widths: 15, 15, 65
+
+ ``DateOffset``, None, "Generic offset class, defaults to 1 calendar day"
+ ``BDay`` or ``BusinessDay``, ``'B'``,"business day (weekday)"
+ ``CDay`` or ``CustomBusinessDay``, ``'C'``, "custom business day"
+ ``Week``, ``'W'``, "one week, optionally anchored on a day of the week"
+ ``WeekOfMonth``, ``'WOM'``, "the x-th day of the y-th week of each month"
+ ``LastWeekOfMonth``, ``'LWOM'``, "the x-th day of the last week of each month"
+ ``MonthEnd``, ``'M'``, "calendar month end"
+ ``MonthBegin``, ``'MS'``, "calendar month begin"
+ ``BMonthEnd`` or ``BusinessMonthEnd``, ``'BM'``, "business month end"
+ ``BMonthBegin`` or ``BusinessMonthBegin``, ``'BMS'``, "business month begin"
+ ``CBMonthEnd`` or ``CustomBusinessMonthEnd``, ``'CBM'``, "custom business month end"
+ ``CBMonthBegin`` or ``CustomBusinessMonthBegin``, ``'CBMS'``, "custom business month begin"
+ ``SemiMonthEnd``, ``'SM'``, "15th (or other day_of_month) and calendar month end"
+ ``SemiMonthBegin``, ``'SMS'``, "15th (or other day_of_month) and calendar month begin"
+ ``QuarterEnd``, ``'Q'``, "calendar quarter end"
+ ``QuarterBegin``, ``'QS'``, "calendar quarter begin"
+ ``BQuarterEnd``, ``'BQ``, "business quarter end"
+ ``BQuarterBegin``, ``'BQS'``, "business quarter begin"
+ ``FY5253Quarter``, ``'REQ'``, "retail (aka 52-53 week) quarter"
+ ``YearEnd``, ``'A'``, "calendar year end"
+ ``YearBegin``, ``'AS'`` or ``'BYS'``,"calendar year begin"
+ ``BYearEnd``, ``'BA'``, "business year end"
+ ``BYearBegin``, ``'BAS'``, "business year begin"
+ ``FY5253``, ``'RE'``, "retail (aka 52-53 week) year"
+ ``Easter``, None, "Easter holiday"
+ ``BusinessHour``, ``'BH'``, "business hour"
+ ``CustomBusinessHour``, ``'CBH'``, "custom business hour"
+ ``Day``, ``'D'``, "one absolute day"
+ ``Hour``, ``'H'``, "one hour"
+ ``Minute``, ``'T'`` or ``'min'``,"one minute"
+ ``Second``, ``'S'``, "one second"
+ ``Milli``, ``'L'`` or ``'ms'``, "one millisecond"
+ ``Micro``, ``'U'`` or ``'us'``, "one microsecond"
+ ``Nano``, ``'N'``, "one nanosecond"
+
+``DateOffsets`` additionally have :meth:`rollforward` and :meth:`rollback`
+methods for moving a date forward or backward respectively to a valid offset
+date relative to the offset. For example, business offsets will roll dates
+that land on the weekends (Saturday and Sunday) forward to Monday since
+business offsets operate on the weekdays.
+
+.. ipython:: python
+
+ ts = pd.Timestamp('2018-01-06 00:00:00')
+ ts.day_name()
+ # BusinessHour's valid offset dates are Monday through Friday
+ offset = pd.offsets.BusinessHour(start='09:00')
+ # Bring the date to the closest offset date (Monday)
+ offset.rollforward(ts)
+ # Date is brought to the closest offset date first and then the hour is added
+ ts + offset
+
+These operations preserve time (hour, minute, etc) information by default.
+To reset time to midnight, use :meth:`normalize` before or after applying
+the operation (depending on whether you want the time information included
+in the operation).
.. ipython:: python
ts = pd.Timestamp('2014-01-01 09:00')
- day = Day()
+ day = pd.offsets.Day()
day.apply(ts)
day.apply(ts).normalize()
ts = pd.Timestamp('2014-01-01 22:00')
- hour = Hour()
+ hour = pd.offsets.Hour()
hour.apply(ts)
hour.apply(ts).normalize()
hour.apply(pd.Timestamp("2014-01-01 23:30")).normalize()
+.. _relativedelta documentation: https://dateutil.readthedocs.io/en/stable/relativedelta.html
+
.. _timeseries.dayvscalendarday:
Day vs. CalendarDay
@@ -968,27 +969,28 @@ particular day of the week:
.. ipython:: python
+ d = datetime.datetime(2008, 8, 18, 9, 0)
d
- d + Week()
- d + Week(weekday=4)
- (d + Week(weekday=4)).weekday()
+ d + pd.offsets.Week()
+ d + pd.offsets.Week(weekday=4)
+ (d + pd.offsets.Week(weekday=4)).weekday()
- d - Week()
+ d - pd.offsets.Week()
The ``normalize`` option will be effective for addition and subtraction.
.. ipython:: python
- d + Week(normalize=True)
- d - Week(normalize=True)
+ d + pd.offsets.Week(normalize=True)
+ d - pd.offsets.Week(normalize=True)
Another example is parameterizing ``YearEnd`` with the specific ending month:
.. ipython:: python
- d + YearEnd()
- d + YearEnd(month=6)
+ d + pd.offsets.YearEnd()
+ d + pd.offsets.YearEnd(month=6)
.. _timeseries.offsetseries:
@@ -1004,9 +1006,9 @@ apply the offset to each element.
rng = pd.date_range('2012-01-01', '2012-01-03')
s = pd.Series(rng)
rng
- rng + DateOffset(months=2)
- s + DateOffset(months=2)
- s - DateOffset(months=2)
+ rng + pd.DateOffset(months=2)
+ s + pd.DateOffset(months=2)
+ s - pd.DateOffset(months=2)
If the offset class maps directly to a ``Timedelta`` (``Day``, ``Hour``,
``Minute``, ``Second``, ``Micro``, ``Milli``, ``Nano``) it can be
@@ -1015,10 +1017,10 @@ used exactly like a ``Timedelta`` - see the
.. ipython:: python
- s - Day(2)
+ s - pd.offsets.Day(2)
td = s - pd.Series(pd.date_range('2011-12-29', '2011-12-31'))
td
- td + Minute(15)
+ td + pd.offsets.Minute(15)
Note that some offsets (such as ``BQuarterEnd``) do not have a
vectorized implementation. They can still be used but may
@@ -1027,7 +1029,7 @@ calculate significantly slower and will show a ``PerformanceWarning``
.. ipython:: python
:okwarning:
- rng + BQuarterEnd()
+ rng + pd.offsets.BQuarterEnd()
.. _timeseries.custombusinessdays:
@@ -1049,9 +1051,9 @@ As an interesting example, let's look at Egypt where a Friday-Saturday weekend i
# They also observe International Workers' Day so let's
# add that for a couple of years
- holidays = ['2012-05-01', datetime(2013, 5, 1), np.datetime64('2014-05-01')]
+ holidays = ['2012-05-01', datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt)
- dt = datetime(2013, 4, 30)
+ dt = datetime.datetime(2013, 4, 30)
dt + 2 * bday_egypt
Let's map to the weekday names:
@@ -1072,7 +1074,7 @@ Holiday calendars can be used to provide the list of holidays. See the
bday_us = CustomBusinessDay(calendar=USFederalHolidayCalendar())
# Friday before MLK Day
- dt = datetime(2014, 1, 17)
+ dt = datetime.datetime(2014, 1, 17)
# Tuesday after MLK Day (Monday is skipped because it's a holiday)
dt + bday_us
@@ -1083,10 +1085,10 @@ in the usual way.
.. ipython:: python
from pandas.tseries.offsets import CustomBusinessMonthBegin
- bmth_us = CustomBusinessMonthBegin(calendar=USFederalHolidayCalendar())
+ bmth_us = pd.offsets.CustomBusinessMonthBegin(calendar=USFederalHolidayCalendar())
# Skip new years
- dt = datetime(2013, 12, 17)
+ dt = datetime.datetime(2013, 12, 17)
dt + bmth_us
# Define date index with custom offset
@@ -1111,13 +1113,13 @@ allowing to use specific start and end times.
By default, ``BusinessHour`` uses 9:00 - 17:00 as business hours.
Adding ``BusinessHour`` will increment ``Timestamp`` by hourly frequency.
-If target ``Timestamp`` is out of business hours, move to the next business hour
-then increment it. If the result exceeds the business hours end, the remaining
+If target ``Timestamp`` is out of business hours, move to the next business hour
+then increment it. If the result exceeds the business hours end, the remaining
hours are added to the next business day.
.. ipython:: python
- bh = BusinessHour()
+ bh = pd.offsets.BusinessHour()
bh
# 2014-08-01 is Friday
@@ -1134,19 +1136,19 @@ hours are added to the next business day.
pd.Timestamp('2014-08-01 16:30') + bh
# Adding 2 business hours
- pd.Timestamp('2014-08-01 10:00') + BusinessHour(2)
+ pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(2)
# Subtracting 3 business hours
- pd.Timestamp('2014-08-01 10:00') + BusinessHour(-3)
+ pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(-3)
-You can also specify ``start`` and ``end`` time by keywords. The argument must
-be a ``str`` with an ``hour:minute`` representation or a ``datetime.time``
-instance. Specifying seconds, microseconds and nanoseconds as business hour
+You can also specify ``start`` and ``end`` time by keywords. The argument must
+be a ``str`` with an ``hour:minute`` representation or a ``datetime.time``
+instance. Specifying seconds, microseconds and nanoseconds as business hour
results in ``ValueError``.
.. ipython:: python
- bh = BusinessHour(start='11:00', end=time(20, 0))
+ bh = pd.offsets.BusinessHour(start='11:00', end=datetime.time(20, 0))
bh
pd.Timestamp('2014-08-01 13:00') + bh
@@ -1159,7 +1161,7 @@ Valid business hours are distinguished by whether it started from valid ``Busine
.. ipython:: python
- bh = BusinessHour(start='17:00', end='09:00')
+ bh = pd.offsets.BusinessHour(start='17:00', end='09:00')
bh
pd.Timestamp('2014-08-01 17:00') + bh
@@ -1184,22 +1186,22 @@ under the default business hours (9:00 - 17:00), there is no gap (0 minutes) bet
.. ipython:: python
# This adjusts a Timestamp to business hour edge
- BusinessHour().rollback(pd.Timestamp('2014-08-02 15:00'))
- BusinessHour().rollforward(pd.Timestamp('2014-08-02 15:00'))
+ pd.offsets.BusinessHour().rollback(pd.Timestamp('2014-08-02 15:00'))
+ pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02 15:00'))
# It is the same as BusinessHour().apply(pd.Timestamp('2014-08-01 17:00')).
# And it is the same as BusinessHour().apply(pd.Timestamp('2014-08-04 09:00'))
- BusinessHour().apply(pd.Timestamp('2014-08-02 15:00'))
+ pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02 15:00'))
# BusinessDay results (for reference)
- BusinessHour().rollforward(pd.Timestamp('2014-08-02'))
+ pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02'))
# It is the same as BusinessDay().apply(pd.Timestamp('2014-08-01'))
# The result is the same as rollworward because BusinessDay never overlap.
- BusinessHour().apply(pd.Timestamp('2014-08-02'))
+ pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02'))
-``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary
-holidays, you can use ``CustomBusinessHour`` offset, as explained in the
+``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary
+holidays, you can use ``CustomBusinessHour`` offset, as explained in the
following subsection.
.. _timeseries.custombusinesshour:
@@ -1216,9 +1218,9 @@ as ``BusinessHour`` except that it skips specified custom holidays.
.. ipython:: python
from pandas.tseries.holiday import USFederalHolidayCalendar
- bhour_us = CustomBusinessHour(calendar=USFederalHolidayCalendar())
+ bhour_us = pd.offsets.CustomBusinessHour(calendar=USFederalHolidayCalendar())
# Friday before MLK Day
- dt = datetime(2014, 1, 17, 15)
+ dt = datetime.datetime(2014, 1, 17, 15)
dt + bhour_us
@@ -1229,7 +1231,7 @@ You can use keyword arguments supported by either ``BusinessHour`` and ``CustomB
.. ipython:: python
- bhour_mon = CustomBusinessHour(start='10:00', weekmask='Tue Wed Thu Fri')
+ bhour_mon = pd.offsets.CustomBusinessHour(start='10:00', weekmask='Tue Wed Thu Fri')
# Monday is skipped because it's a holiday, business hour starts from 10:00
dt + bhour_mon * 2
@@ -1285,7 +1287,7 @@ most functions:
pd.date_range(start, periods=5, freq='B')
- pd.date_range(start, periods=5, freq=BDay())
+ pd.date_range(start, periods=5, freq=pd.offsets.BDay())
You can combine together day and intraday offsets:
@@ -1352,39 +1354,39 @@ anchor point, and moved ``|n|-1`` additional steps forwards or backwards.
.. ipython:: python
- pd.Timestamp('2014-01-02') + MonthBegin(n=1)
- pd.Timestamp('2014-01-02') + MonthEnd(n=1)
+ pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=1)
+ pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-02') - MonthBegin(n=1)
- pd.Timestamp('2014-01-02') - MonthEnd(n=1)
+ pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=1)
+ pd.Timestamp('2014-01-02') - pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-02') + MonthBegin(n=4)
- pd.Timestamp('2014-01-02') - MonthBegin(n=4)
+ pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=4)
+ pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=4)
If the given date *is* on an anchor point, it is moved ``|n|`` points forwards
or backwards.
.. ipython:: python
- pd.Timestamp('2014-01-01') + MonthBegin(n=1)
- pd.Timestamp('2014-01-31') + MonthEnd(n=1)
+ pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=1)
+ pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-01') - MonthBegin(n=1)
- pd.Timestamp('2014-01-31') - MonthEnd(n=1)
+ pd.Timestamp('2014-01-01') - pd.offsets.MonthBegin(n=1)
+ pd.Timestamp('2014-01-31') - pd.offsets.MonthEnd(n=1)
- pd.Timestamp('2014-01-01') + MonthBegin(n=4)
- pd.Timestamp('2014-01-31') - MonthBegin(n=4)
+ pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=4)
+ pd.Timestamp('2014-01-31') - pd.offsets.MonthBegin(n=4)
For the case when ``n=0``, the date is not moved if on an anchor point, otherwise
it is rolled forward to the next anchor point.
.. ipython:: python
- pd.Timestamp('2014-01-02') + MonthBegin(n=0)
- pd.Timestamp('2014-01-02') + MonthEnd(n=0)
+ pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=0)
+ pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=0)
- pd.Timestamp('2014-01-01') + MonthBegin(n=0)
- pd.Timestamp('2014-01-31') + MonthEnd(n=0)
+ pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=0)
+ pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=0)
.. _timeseries.holiday:
@@ -1427,10 +1429,10 @@ An example of how holidays and holiday calendars are defined:
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
Holiday('Columbus Day', month=10, day=1,
- offset=DateOffset(weekday=MO(2))), #same as 2*Week(weekday=2)
+ offset=pd.DateOffset(weekday=MO(2))), #same as 2*Week(weekday=2)
]
cal = ExampleCalendar()
- cal.holidays(datetime(2012, 1, 1), datetime(2012, 12, 31))
+ cal.holidays(datetime.datetime(2012, 1, 1), datetime.datetime(2012, 12, 31))
Using this calendar, creating an index or doing offset arithmetic skips weekends
and holidays (i.e., Memorial Day/July 4th). For example, the below defines
@@ -1444,10 +1446,10 @@ or ``Timestamp`` objects.
pd.DatetimeIndex(start='7/1/2012', end='7/10/2012',
freq=CDay(calendar=cal)).to_pydatetime()
offset = CustomBusinessDay(calendar=cal)
- datetime(2012, 5, 25) + offset
- datetime(2012, 7, 3) + offset
- datetime(2012, 7, 3) + 2 * offset
- datetime(2012, 7, 6) + offset
+ datetime.datetime(2012, 5, 25) + offset
+ datetime.datetime(2012, 7, 3) + offset
+ datetime.datetime(2012, 7, 3) + 2 * offset
+ datetime.datetime(2012, 7, 6) + offset
Ranges are defined by the ``start_date`` and ``end_date`` class attributes
of ``AbstractHolidayCalendar``. The defaults are shown below.
@@ -1462,8 +1464,8 @@ datetime/Timestamp/string.
.. ipython:: python
- AbstractHolidayCalendar.start_date = datetime(2012, 1, 1)
- AbstractHolidayCalendar.end_date = datetime(2012, 12, 31)
+ AbstractHolidayCalendar.start_date = datetime.datetime(2012, 1, 1)
+ AbstractHolidayCalendar.end_date = datetime.datetime(2012, 12, 31)
cal.holidays()
Every calendar class is accessible by name using the ``get_calendar`` function
@@ -1490,7 +1492,7 @@ Shifting / Lagging
~~~~~~~~~~~~~~~~~~
One may want to *shift* or *lag* the values in a time series back and forward in
-time. The method for this is :meth:`~Series.shift`, which is available on all of
+time. The method for this is :meth:`~Series.shift`, which is available on all of
the pandas objects.
.. ipython:: python
@@ -1500,16 +1502,16 @@ the pandas objects.
ts.shift(1)
The ``shift`` method accepts an ``freq`` argument which can accept a
-``DateOffset`` class or other ``timedelta``-like object or also an
+``DateOffset`` class or other ``timedelta``-like object or also an
:ref:`offset alias <timeseries.offset_aliases>`:
.. ipython:: python
- ts.shift(5, freq=offsets.BDay())
+ ts.shift(5, freq=pd.offsets.BDay())
ts.shift(5, freq='BM')
Rather than changing the alignment of the data and the index, ``DataFrame`` and
-``Series`` objects also have a :meth:`~Series.tshift` convenience method that
+``Series`` objects also have a :meth:`~Series.tshift` convenience method that
changes all the dates in the index by a specified number of offsets:
.. ipython:: python
@@ -1522,35 +1524,35 @@ is not being realigned.
Frequency Conversion
~~~~~~~~~~~~~~~~~~~~
-The primary function for changing frequencies is the :meth:`~Series.asfreq`
-method. For a ``DatetimeIndex``, this is basically just a thin, but convenient
-wrapper around :meth:`~Series.reindex` which generates a ``date_range`` and
+The primary function for changing frequencies is the :meth:`~Series.asfreq`
+method. For a ``DatetimeIndex``, this is basically just a thin, but convenient
+wrapper around :meth:`~Series.reindex` which generates a ``date_range`` and
calls ``reindex``.
.. ipython:: python
- dr = pd.date_range('1/1/2010', periods=3, freq=3 * offsets.BDay())
+ dr = pd.date_range('1/1/2010', periods=3, freq=3 * pd.offsets.BDay())
ts = pd.Series(randn(3), index=dr)
ts
- ts.asfreq(BDay())
+ ts.asfreq(pd.offsets.BDay())
``asfreq`` provides a further convenience so you can specify an interpolation
method for any gaps that may appear after the frequency conversion.
.. ipython:: python
- ts.asfreq(BDay(), method='pad')
+ ts.asfreq(pd.offsets.BDay(), method='pad')
Filling Forward / Backward
~~~~~~~~~~~~~~~~~~~~~~~~~~
-Related to ``asfreq`` and ``reindex`` is :meth:`~Series.fillna`, which is
+Related to ``asfreq`` and ``reindex`` is :meth:`~Series.fillna`, which is
documented in the :ref:`missing data section <missing_data.fillna>`.
Converting to Python Datetimes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-``DatetimeIndex`` can be converted to an array of Python native
+``DatetimeIndex`` can be converted to an array of Python native
:py:class:`datetime.datetime` objects using the ``to_pydatetime`` method.
.. _timeseries.resampling:
@@ -1563,13 +1565,13 @@ Resampling
The interface to ``.resample`` has changed in 0.18.0 to be more groupby-like and hence more flexible.
See the :ref:`whatsnew docs <whatsnew_0180.breaking.resample>` for a comparison with prior versions.
-Pandas has a simple, powerful, and efficient functionality for performing
-resampling operations during frequency conversion (e.g., converting secondly
-data into 5-minutely data). This is extremely common in, but not limited to,
+Pandas has a simple, powerful, and efficient functionality for performing
+resampling operations during frequency conversion (e.g., converting secondly
+data into 5-minutely data). This is extremely common in, but not limited to,
financial applications.
-:meth:`~Series.resample` is a time-based groupby, followed by a reduction method
-on each of its groups. See some :ref:`cookbook examples <cookbook.resample>` for
+:meth:`~Series.resample` is a time-based groupby, followed by a reduction method
+on each of its groups. See some :ref:`cookbook examples <cookbook.resample>` for
some advanced strategies.
Starting in version 0.18.1, the ``resample()`` function can be used directly from
@@ -1577,7 +1579,7 @@ Starting in version 0.18.1, the ``resample()`` function can be used directly fro
.. note::
- ``.resample()`` is similar to using a :meth:`~Series.rolling` operation with
+ ``.resample()`` is similar to using a :meth:`~Series.rolling` operation with
a time-based offset, see a discussion :ref:`here <stats.moments.ts-versus-resampling>`.
Basics
@@ -1632,8 +1634,8 @@ labels.
.. note::
- The default values for ``label`` and ``closed`` is 'left' for all
- frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W'
+ The default values for ``label`` and ``closed`` is 'left' for all
+ frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W'
which all have a default of 'right'.
.. ipython:: python
@@ -1680,9 +1682,9 @@ Sparse Resampling
~~~~~~~~~~~~~~~~~
Sparse timeseries are the ones where you have a lot fewer points relative
-to the amount of time you are looking to resample. Naively upsampling a sparse
-series can potentially generate lots of intermediate values. When you don't want
-to use a method to fill these values, e.g. ``fill_method`` is ``None``, then
+to the amount of time you are looking to resample. Naively upsampling a sparse
+series can potentially generate lots of intermediate values. When you don't want
+to use a method to fill these values, e.g. ``fill_method`` is ``None``, then
intermediate values will be filled with ``NaN``.
Since ``resample`` is a time-based groupby, the following is a method to efficiently
@@ -1845,13 +1847,13 @@ If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``,
.. ipython:: python
p = pd.Period('2014-07-01 09:00', freq='H')
- p + Hour(2)
- p + timedelta(minutes=120)
+ p + pd.offsets.Hour(2)
+ p + datetime.timedelta(minutes=120)
p + np.timedelta64(7200, 's')
.. code-block:: ipython
- In [1]: p + Minute(5)
+ In [1]: p + pd.offsets.Minute(5)
Traceback
...
ValueError: Input has different freq from Period(freq=H)
@@ -1861,11 +1863,11 @@ If ``Period`` has other frequencies, only the same ``offsets`` can be added. Oth
.. ipython:: python
p = pd.Period('2014-07', freq='M')
- p + MonthEnd(3)
+ p + pd.offsets.MonthEnd(3)
.. code-block:: ipython
- In [1]: p + MonthBegin(3)
+ In [1]: p + pd.offsets.MonthBegin(3)
Traceback
...
ValueError: Input has different freq from Period(freq=M)
@@ -1923,11 +1925,11 @@ objects:
idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H')
idx
- idx + Hour(2)
+ idx + pd.offsets.Hour(2)
idx = pd.period_range('2014-07', periods=5, freq='M')
idx
- idx + MonthEnd(3)
+ idx + pd.offsets.MonthEnd(3)
``PeriodIndex`` has its own dtype named ``period``, refer to :ref:`Period Dtypes <timeseries.period_dtype>`.
@@ -1977,7 +1979,7 @@ You can pass in dates and strings to ``Series`` and ``DataFrame`` with ``PeriodI
ps['2011-01']
- ps[datetime(2011, 12, 25):]
+ ps[datetime.datetime(2011, 12, 25):]
ps['10/31/2011':'12/31/2011']
@@ -2373,7 +2375,7 @@ can be controlled by the ``nonexistent`` argument. The following options are ava
.. ipython:: python
- dti = pd.date_range(start='2015-03-29 01:30:00', periods=3, freq='H')
+ dti = pd.date_range(start='2015-03-29 02:30:00', periods=3, freq='H')
# 2:30 is a nonexistent time
Localization of nonexistent times will raise an error by default.
| Updated `timeseries.rst` introduction to DateOffsets
* Explain relationship with `Timedelta`
* Clarify explanation of rollforward/back
* Bring all the offsets and their aliases to the front
Misc fixes:
* Period dtypes are preserved in Series & DataFrames
* Rendering for timezone nonexistent examples | https://api.github.com/repos/pandas-dev/pandas/pulls/23385 | 2018-10-27T22:52:27Z | 2018-12-01T23:37:34Z | 2018-12-01T23:37:34Z | 2018-12-02T02:09:02Z |
ENH: Add Styler.pipe() method (#23229) | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 81bb420c47a99..415a8dfa029c4 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -2482,6 +2482,7 @@ Style Application
Styler.set_properties
Styler.set_uuid
Styler.clear
+ Styler.pipe
Builtin Styles
~~~~~~~~~~~~~~
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index d0dddb19f4c93..1a7f96bf7ae41 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -184,6 +184,30 @@ array, but rather an ``ExtensionArray``:
This is the same behavior as ``Series.values`` for categorical data. See
:ref:`whatsnew_0240.api_breaking.interval_values` for more.
+
+.. _whatsnew_0240.enhancements.styler_pipe:
+
+New ``Styler.pipe()`` method
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The :class:`~pandas.io.formats.style.Styler` class has gained a
+:meth:`~pandas.io.formats.style.Styler.pipe` method (:issue:`23229`). This provides a
+convenient way to apply users' predefined styling functions, and can help reduce
+"boilerplate" when using DataFrame styling functionality repeatedly within a notebook.
+
+.. ipython:: python
+
+ df = pandas.DataFrame({'N': [1250, 1500, 1750], 'X': [0.25, 0.35, 0.50]})
+
+ def format_and_align(styler):
+ return (styler.format({'N': '{:,}', 'X': '{:.1%}'})
+ .set_properties(**{'text-align': 'right'}))
+
+ df.style.pipe(format_and_align).set_caption('Summary of results.')
+
+Similar methods already exist for other classes in pandas, including :meth:`DataFrame.pipe`,
+:meth:`Groupby.pipe`, and :meth:`Resampler.pipe`.
+
+
.. _whatsnew_0240.enhancements.join_with_two_multiindexes:
Joining with two multi-indexes
@@ -225,6 +249,7 @@ For earlier versions this can be done using the following.
pd.merge(left.reset_index(), right.reset_index(),
on=['key'], how='inner').set_index(['key', 'X', 'Y'])
+
.. _whatsnew_0240.enhancements.rename_axis:
Renaming names in a MultiIndex
@@ -248,6 +273,7 @@ Example:
See the :ref:`advanced docs on renaming<advanced.index_names>` for more details.
+
.. _whatsnew_0240.enhancements.other:
Other Enhancements
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index dda50b6a0e7f8..8ee9ea5b3d980 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1235,6 +1235,75 @@ class MyStyler(cls):
return MyStyler
+ def pipe(self, func, *args, **kwargs):
+ """
+ Apply ``func(self, *args, **kwargs)``, and return the result.
+
+ .. versionadded:: 0.24.0
+
+ Parameters
+ ----------
+ func : function
+ Function to apply to the Styler. Alternatively, a
+ ``(callable, keyword)`` tuple where ``keyword`` is a string
+ indicating the keyword of ``callable`` that expects the Styler.
+ *args, **kwargs :
+ Arguments passed to `func`.
+
+ Returns
+ -------
+ object :
+ The value returned by ``func``.
+
+ See Also
+ --------
+ DataFrame.pipe : Analogous method for DataFrame.
+ Styler.apply : Apply a function row-wise, column-wise, or table-wise to
+ modify the dataframe's styling.
+
+ Notes
+ -----
+ Like :meth:`DataFrame.pipe`, this method can simplify the
+ application of several user-defined functions to a styler. Instead
+ of writing:
+
+ .. code-block:: python
+
+ f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
+
+ users can write:
+
+ .. code-block:: python
+
+ (df.style.set_precision(3)
+ .pipe(g, arg1=a)
+ .pipe(f, arg2=b, arg3=c))
+
+ In particular, this allows users to define functions that take a
+ styler object, along with other parameters, and return the styler after
+ making styling changes (such as calling :meth:`Styler.apply` or
+ :meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
+ style "transformations" can be interleaved with calls to the built-in
+ Styler interface.
+
+ Examples
+ --------
+ >>> def format_conversion(styler):
+ ... return (styler.set_properties(**{'text-align': 'right'})
+ ... .format({'conversion': '{:.1%}'}))
+
+ The user-defined ``format_conversion`` function above can be called
+ within a sequence of other style modifications:
+
+ >>> df = pd.DataFrame({'trial': list(range(5)),
+ ... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
+ >>> (df.style
+ ... .highlight_min(subset=['conversion'], color='yellow')
+ ... .pipe(format_conversion)
+ ... .set_caption("Results with minimum conversion highlighted."))
+ """
+ return com._pipe(self, func, *args, **kwargs)
+
def _is_visible(idx_row, idx_col, lengths):
"""
diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py
index 6027fc08624df..fa8bd91dce939 100644
--- a/pandas/tests/io/formats/test_style.py
+++ b/pandas/tests/io/formats/test_style.py
@@ -1173,6 +1173,22 @@ def test_hide_columns_mult_levels(self):
assert ctx['body'][1][2]['is_visible']
assert ctx['body'][1][2]['display_value'] == 3
+ def test_pipe(self):
+ def set_caption_from_template(styler, a, b):
+ return styler.set_caption(
+ 'Dataframe with a = {a} and b = {b}'.format(a=a, b=b))
+
+ styler = self.df.style.pipe(set_caption_from_template, 'A', b='B')
+ assert 'Dataframe with a = A and b = B' in styler.render()
+
+ # Test with an argument that is a (callable, keyword_name) pair.
+ def f(a, b, styler):
+ return (a, b, styler)
+
+ styler = self.df.style
+ result = styler.pipe((f, 'styler'), a=1, b=2)
+ assert result == (1, 2, styler)
+
@td.skip_if_no_mpl
class TestStylerMatplotlibDep(object):
| Added `Styler.pipe()` method. This allows users to easily apply and compose functions that operate on Styler objects, just like the `DataFrame.pipe()` method does for dataframes.
- [x] closes #23229
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/23384 | 2018-10-27T20:52:18Z | 2018-11-28T14:48:33Z | 2018-11-28T14:48:33Z | 2018-11-28T14:48:33Z |
Integrate flake8_rst into ./ci/code_check.sh | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 330901ba56fbd..c4b483a794c21 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -44,6 +44,13 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
flake8 pandas/_libs --filename=*.pxi.in,*.pxd --select=E501,E302,E203,E111,E114,E221,E303,E231,E126,F403
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ echo "flake8-rst --version"
+ flake8-rst --version
+
+ MSG='Linting code-blocks in .rst documentation' ; echo $MSG
+ flake8-rst doc/source --filename=*.rst
+ RET=$(($RET + $?)) ; echo $MSG "DONE"
+
# Check that cython casting is of the form `<type>obj` as opposed to `<type> obj`;
# it doesn't make a difference, but we want to be internally consistent.
# Note: this grep pattern is (intended to be) equivalent to the python
@@ -64,6 +71,9 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then
cpplint --quiet --extensions=c,h --headers=h --recursive --filter=-readability/casting,-runtime/int,-build/include_subdir pandas/_libs/src/*.h pandas/_libs/src/parser pandas/_libs/ujson pandas/_libs/tslibs/src/datetime
RET=$(($RET + $?)) ; echo $MSG "DONE"
+ echo "isort --version-number"
+ isort --version-number
+
# Imports - Check formatting using isort see setup.cfg for settings
MSG='Check import format using isort ' ; echo $MSG
isort --recursive --check-only pandas
diff --git a/ci/deps/travis-36.yaml b/ci/deps/travis-36.yaml
index 352717a842214..8aa551f6194d9 100644
--- a/ci/deps/travis-36.yaml
+++ b/ci/deps/travis-36.yaml
@@ -9,6 +9,7 @@ dependencies:
- fastparquet
- flake8>=3.5
- flake8-comprehensions
+ - flake8-rst
- gcsfs
- geopandas
- html5lib
diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml
index 3e69b1f725b24..2718c1cd582b6 100644
--- a/ci/environment-dev.yaml
+++ b/ci/environment-dev.yaml
@@ -7,6 +7,7 @@ dependencies:
- NumPy
- flake8
- flake8-comprehensions
+ - flake8-rst
- hypothesis>=3.58.0
- isort
- moto
diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt
index 6a8b8d64d943b..a1cb20c265974 100644
--- a/ci/requirements_dev.txt
+++ b/ci/requirements_dev.txt
@@ -4,6 +4,7 @@ Cython>=0.28.2
NumPy
flake8
flake8-comprehensions
+flake8-rst
hypothesis>=3.58.0
isort
moto
diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index fbbe94a72c71e..b5938a24ce6c5 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -45,7 +45,7 @@ a default integer index:
.. ipython:: python
- s = pd.Series([1,3,5,np.nan,6,8])
+ s = pd.Series([1, 3, 5, np.nan, 6, 8])
s
Creating a :class:`DataFrame` by passing a NumPy array, with a datetime index
@@ -62,12 +62,12 @@ Creating a ``DataFrame`` by passing a dict of objects that can be converted to s
.. ipython:: python
- df2 = pd.DataFrame({ 'A' : 1.,
- 'B' : pd.Timestamp('20130102'),
- 'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
- 'D' : np.array([3] * 4,dtype='int32'),
- 'E' : pd.Categorical(["test","train","test","train"]),
- 'F' : 'foo' })
+ df2 = pd.DataFrame({'A': 1.,
+ 'B': pd.Timestamp('20130102'),
+ 'C': pd.Series(1, index=list(range(4)),dtype='float32'),
+ 'D': np.array([3] * 4, dtype='int32'),
+ 'E': pd.Categorical(["test", "train", "test", "train"]),
+ 'F': 'foo'})
df2
The columns of the resulting ``DataFrame`` have different
@@ -283,9 +283,9 @@ Using the :func:`~Series.isin` method for filtering:
.. ipython:: python
df2 = df.copy()
- df2['E'] = ['one', 'one','two','three','four','three']
+ df2['E'] = ['one', 'one', 'two', 'three', 'four', 'three']
df2
- df2[df2['E'].isin(['two','four'])]
+ df2[df2['E'].isin(['two', 'four'])]
Setting
~~~~~~~
@@ -295,7 +295,7 @@ by the indexes.
.. ipython:: python
- s1 = pd.Series([1,2,3,4,5,6], index=pd.date_range('20130102', periods=6))
+ s1 = pd.Series([1, 2, 3, 4, 5, 6], index=pd.date_range('20130102', periods=6))
s1
df['F'] = s1
@@ -394,7 +394,7 @@ In addition, pandas automatically broadcasts along the specified dimension.
.. ipython:: python
- s = pd.Series([1,3,5,np.nan,6,8], index=dates).shift(2)
+ s = pd.Series([1, 3, 5, np.nan, 6, 8], index=dates).shift(2)
s
df.sub(s, axis='index')
@@ -492,7 +492,7 @@ section.
.. ipython:: python
- df = pd.DataFrame(np.random.randn(8, 4), columns=['A','B','C','D'])
+ df = pd.DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df
s = df.iloc[3]
df.append(s, ignore_index=True)
@@ -512,12 +512,12 @@ See the :ref:`Grouping section <groupby>`.
.. ipython:: python
- df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
- 'foo', 'bar', 'foo', 'foo'],
- 'B' : ['one', 'one', 'two', 'three',
- 'two', 'two', 'one', 'three'],
- 'C' : np.random.randn(8),
- 'D' : np.random.randn(8)})
+ df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
+ 'foo', 'bar', 'foo', 'foo'],
+ 'B': ['one', 'one', 'two', 'three',
+ 'two', 'two', 'one', 'three'],
+ 'C': np.random.randn(8),
+ 'D': np.random.randn(8)})
df
Grouping and then applying the :meth:`~DataFrame.sum` function to the resulting
@@ -532,7 +532,7 @@ apply the ``sum`` function.
.. ipython:: python
- df.groupby(['A','B']).sum()
+ df.groupby(['A', 'B']).sum()
Reshaping
---------
@@ -578,11 +578,11 @@ See the section on :ref:`Pivot Tables <reshaping.pivot>`.
.. ipython:: python
- df = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
- 'B' : ['A', 'B', 'C'] * 4,
- 'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2,
- 'D' : np.random.randn(12),
- 'E' : np.random.randn(12)})
+ df = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 3,
+ 'B': ['A', 'B', 'C'] * 4,
+ 'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2,
+ 'D': np.random.randn(12),
+ 'E': np.random.randn(12)})
df
We can produce pivot tables from this data very easily:
@@ -653,7 +653,7 @@ pandas can include categorical data in a ``DataFrame``. For full docs, see the
.. ipython:: python
- df = pd.DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
+ df = pd.DataFrame({"id":[1, 2, 3, 4, 5, 6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
Convert the raw grades to a categorical data type.
@@ -753,13 +753,13 @@ Writing to a HDF5 Store.
.. ipython:: python
- df.to_hdf('foo.h5','df')
+ df.to_hdf('foo.h5', 'df')
Reading from a HDF5 Store.
.. ipython:: python
- pd.read_hdf('foo.h5','df')
+ pd.read_hdf('foo.h5', 'df')
.. ipython:: python
:suppress:
@@ -796,7 +796,7 @@ If you are attempting to perform an operation you might see an exception like:
.. code-block:: python
>>> if pd.Series([False, True, False]):
- print("I was true")
+ ... print("I was true")
Traceback
...
ValueError: The truth value of an array is ambiguous. Use a.empty, a.any() or a.all().
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index 608e2c8e72ded..24c117a534209 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -318,13 +318,13 @@ As usual, **both sides** of the slicers are included as this is label indexing.
.. code-block:: python
- df.loc[(slice('A1','A3'),.....), :]
+ df.loc[(slice('A1', 'A3'), ...), :] # noqa: E999
You should **not** do this:
.. code-block:: python
- df.loc[(slice('A1','A3'),.....)]
+ df.loc[(slice('A1', 'A3'), ...)] # noqa: E999
.. ipython:: python
@@ -532,7 +532,7 @@ used to move the values from the ``MultiIndex`` to a column.
.. ipython:: python
df.rename_axis(index=['abc', 'def'])
-
+
Note that the columns of a ``DataFrame`` are an index, so that using
``rename_axis`` with the ``columns`` argument will change the name of that
index.
@@ -779,7 +779,7 @@ values **not** in the categories, similarly to how you can reindex **any** panda
Reshaping and Comparison operations on a ``CategoricalIndex`` must have the same categories
or a ``TypeError`` will be raised.
- .. code-block:: python
+ .. code-block:: ipython
In [9]: df3 = pd.DataFrame({'A' : np.arange(6),
'B' : pd.Series(list('aabbca')).astype('category')})
@@ -1071,7 +1071,7 @@ On the other hand, if the index is not monotonic, then both slice bounds must be
# OK because 2 and 4 are in the index
df.loc[2:4, :]
-.. code-block:: python
+.. code-block:: ipython
# 0 is not in the index
In [9]: df.loc[0:4, :]
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 81efbfd6d1403..d19fcedf4e766 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -306,8 +306,8 @@ To evaluate single-element pandas objects in a boolean context, use the method
.. code-block:: python
- >>> if df:
- ...
+ >>> if df: # noqa: E999
+ ...
Or
@@ -317,7 +317,7 @@ To evaluate single-element pandas objects in a boolean context, use the method
These will both raise errors, as you are trying to compare multiple values.
- .. code-block:: python
+ .. code-block:: python-traceback
ValueError: The truth value of an array is ambiguous. Use a.empty, a.any() or a.all().
@@ -732,9 +732,8 @@ with the equivalent
.. code-block:: python
>>> (df.pipe(h)
- .pipe(g, arg1=1)
- .pipe(f, arg2=2, arg3=3)
- )
+ ... .pipe(g, arg1=1)
+ ... .pipe(f, arg2=2, arg3=3))
Pandas encourages the second style, which is known as method chaining.
``pipe`` makes it easy to use your own or another library's functions
diff --git a/doc/source/comparison_with_sas.rst b/doc/source/comparison_with_sas.rst
index 4d7acdf9ab16c..318bffe44a81b 100644
--- a/doc/source/comparison_with_sas.rst
+++ b/doc/source/comparison_with_sas.rst
@@ -744,7 +744,7 @@ XPORT is a relatively limited format and the parsing of it is not as
optimized as some of the other pandas readers. An alternative way
to interop data between SAS and pandas is to serialize to csv.
-.. code-block:: python
+.. code-block:: ipython
# version 0.17, 10M rows
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 3ec505998fde0..084f710091a1b 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -792,7 +792,7 @@ Transitioning to ``pytest``
.. code-block:: python
class TestReallyCoolFeature(object):
- ....
+ pass
Going forward, we are moving to a more *functional* style using the `pytest <http://docs.pytest.org/en/latest/>`__ framework, which offers a richer testing
framework that will facilitate testing and developing. Thus, instead of writing test classes, we will write test functions like this:
@@ -800,7 +800,7 @@ framework that will facilitate testing and developing. Thus, instead of writing
.. code-block:: python
def test_really_cool_feature():
- ....
+ pass
Using ``pytest``
~~~~~~~~~~~~~~~~
@@ -825,25 +825,30 @@ We would name this file ``test_cool_feature.py`` and put in an appropriate place
import pandas as pd
from pandas.util import testing as tm
+
@pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64'])
def test_dtypes(dtype):
assert str(np.dtype(dtype)) == dtype
- @pytest.mark.parametrize('dtype', ['float32',
- pytest.param('int16', marks=pytest.mark.skip),
- pytest.param('int32',
- marks=pytest.mark.xfail(reason='to show how it works'))])
+
+ @pytest.mark.parametrize(
+ 'dtype', ['float32', pytest.param('int16', marks=pytest.mark.skip),
+ pytest.param('int32', marks=pytest.mark.xfail(
+ reason='to show how it works'))])
def test_mark(dtype):
assert str(np.dtype(dtype)) == 'float32'
+
@pytest.fixture
def series():
return pd.Series([1, 2, 3])
+
@pytest.fixture(params=['int8', 'int16', 'int32', 'int64'])
def dtype(request):
return request.param
+
def test_series(series, dtype):
result = series.astype(dtype)
assert result.dtype == dtype
@@ -912,6 +917,7 @@ for details <https://hypothesis.readthedocs.io/en/latest/index.html>`_.
st.lists(any_json_value), st.dictionaries(st.text(), any_json_value)
))
+
@given(value=any_json_value)
def test_json_roundtrip(value):
result = json.loads(json.dumps(value))
diff --git a/doc/source/contributing_docstring.rst b/doc/source/contributing_docstring.rst
index 38e4baa66ef67..2f8ffc2e07c71 100644
--- a/doc/source/contributing_docstring.rst
+++ b/doc/source/contributing_docstring.rst
@@ -197,6 +197,8 @@ infinitive verb.
"""
pass
+.. code-block:: python
+
def astype(dtype):
"""
Method to cast Series type.
@@ -205,6 +207,8 @@ infinitive verb.
"""
pass
+.. code-block:: python
+
def astype(dtype):
"""
Cast Series type
@@ -213,6 +217,8 @@ infinitive verb.
"""
pass
+.. code-block:: python
+
def astype(dtype):
"""
Cast Series type from its current type to the new type defined in
@@ -624,6 +630,7 @@ A simple example could be:
.. code-block:: python
class Series:
+
def head(self, n=5):
"""
Return the first elements of the Series.
@@ -681,12 +688,11 @@ shown:
.. code-block:: python
- import numpy as np
- import pandas as pd
-
+ import numpy as np # noqa: F401
+ import pandas as pd # noqa: F401
Any other module used in the examples must be explicitly imported, one per line (as
-recommended in `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_)
+recommended in :pep:`8#imports`)
and avoiding aliases. Avoid excessive imports, but if needed, imports from
the standard library go first, followed by third-party libraries (like
matplotlib).
@@ -720,6 +726,7 @@ positional arguments ``head(3)``.
.. code-block:: python
class Series:
+
def mean(self):
"""
Compute the mean of the input.
@@ -946,12 +953,14 @@ substitute the children's class names in this docstring.
"""Apply my function to %(klass)s."""
...
+
class ChildA(Parent):
@Substitution(klass="ChildA")
@Appender(Parent.my_function.__doc__)
def my_function(self):
...
+
class ChildB(Parent):
@Substitution(klass="ChildB")
@Appender(Parent.my_function.__doc__)
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 3d26a9c7d3d54..53468e755a722 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -968,7 +968,7 @@ Parsing date components in multi-columns
Parsing date components in multi-columns is faster with a format
-.. code-block:: python
+.. code-block:: ipython
In [30]: i = pd.date_range('20000101',periods=10000)
@@ -1266,6 +1266,7 @@ The `method` argument within `DataFrame.corr` can accept a callable in addition
...
... return cov_ab / std_a / std_b
...
+ ...
>>> df = pd.DataFrame(np.random.normal(size=(100, 3)))
...
>>> df.corr(method=distcorr)
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst
index d02912294060c..b55f93566c03d 100644
--- a/doc/source/dsintro.rst
+++ b/doc/source/dsintro.rst
@@ -566,13 +566,12 @@ To write code compatible with all versions of Python, split the assignment in tw
.. code-block:: python
>>> dependent = pd.DataFrame({"A": [1, 1, 1]})
- >>> dependent.assign(A=lambda x: x["A"] + 1,
- B=lambda x: x["A"] + 2)
+ >>> dependent.assign(A=lambda x: x["A"] + 1, B=lambda x: x["A"] + 2)
For Python 3.5 and earlier the expression creating ``B`` refers to the
"old" value of ``A``, ``[1, 1, 1]``. The output is then
- .. code-block:: python
+ .. code-block:: console
A B
0 2 3
@@ -582,7 +581,7 @@ To write code compatible with all versions of Python, split the assignment in tw
For Python 3.6 and later, the expression creating ``A`` refers to the
"new" value of ``A``, ``[2, 2, 2]``, which results in
- .. code-block:: python
+ .. code-block:: console
A B
0 2 4
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst
index 8f8a9fe3e50e0..2ca8a2b7ac0f8 100644
--- a/doc/source/enhancingperf.rst
+++ b/doc/source/enhancingperf.rst
@@ -298,7 +298,7 @@ advanced Cython techniques:
Even faster, with the caveat that a bug in our Cython code (an off-by-one error,
for example) might cause a segfault because memory access isn't checked.
-For more about ``boundscheck`` and ``wraparound``, see the Cython docs on
+For more about ``boundscheck`` and ``wraparound``, see the Cython docs on
`compiler directives <http://cython.readthedocs.io/en/latest/src/reference/compilation.html?highlight=wraparound#compiler-directives>`__.
.. _enhancingperf.numba:
@@ -323,39 +323,45 @@ Numba works by generating optimized machine code using the LLVM compiler infrast
Jit
~~~
-We demonstrate how to use Numba to just-in-time compile our code. We simply
+We demonstrate how to use Numba to just-in-time compile our code. We simply
take the plain Python code from above and annotate with the ``@jit`` decorator.
.. code-block:: python
import numba
+
@numba.jit
def f_plain(x):
- return x * (x - 1)
+ return x * (x - 1)
+
@numba.jit
def integrate_f_numba(a, b, N):
- s = 0
- dx = (b - a) / N
- for i in range(N):
- s += f_plain(a + i * dx)
- return s * dx
+ s = 0
+ dx = (b - a) / N
+ for i in range(N):
+ s += f_plain(a + i * dx)
+ return s * dx
+
@numba.jit
def apply_integrate_f_numba(col_a, col_b, col_N):
- n = len(col_N)
- result = np.empty(n, dtype='float64')
- assert len(col_a) == len(col_b) == n
- for i in range(n):
- result[i] = integrate_f_numba(col_a[i], col_b[i], col_N[i])
- return result
+ n = len(col_N)
+ result = np.empty(n, dtype='float64')
+ assert len(col_a) == len(col_b) == n
+ for i in range(n):
+ result[i] = integrate_f_numba(col_a[i], col_b[i], col_N[i])
+ return result
+
def compute_numba(df):
- result = apply_integrate_f_numba(df['a'].values, df['b'].values, df['N'].values)
- return pd.Series(result, index=df.index, name='result')
+ result = apply_integrate_f_numba(df['a'].values, df['b'].values,
+ df['N'].values)
+ return pd.Series(result, index=df.index, name='result')
-Note that we directly pass NumPy arrays to the Numba function. ``compute_numba`` is just a wrapper that provides a nicer interface by passing/returning pandas objects.
+Note that we directly pass NumPy arrays to the Numba function. ``compute_numba`` is just a wrapper that provides a
+nicer interface by passing/returning pandas objects.
.. code-block:: ipython
@@ -375,13 +381,16 @@ Consider the following toy example of doubling each observation:
import numba
+
def double_every_value_nonumba(x):
- return x*2
+ return x * 2
+
@numba.vectorize
def double_every_value_withnumba(x):
- return x*2
+ return x * 2
+.. code-block:: ipython
# Custom function without numba
In [5]: %timeit df['col1_doubled'] = df.a.apply(double_every_value_nonumba)
@@ -402,18 +411,18 @@ Caveats
Numba will execute on any function, but can only accelerate certain classes of functions.
-Numba is best at accelerating functions that apply numerical functions to NumPy
-arrays. When passed a function that only uses operations it knows how to
+Numba is best at accelerating functions that apply numerical functions to NumPy
+arrays. When passed a function that only uses operations it knows how to
accelerate, it will execute in ``nopython`` mode.
-If Numba is passed a function that includes something it doesn't know how to
-work with -- a category that currently includes sets, lists, dictionaries, or
-string functions -- it will revert to ``object mode``. In ``object mode``,
-Numba will execute but your code will not speed up significantly. If you would
-prefer that Numba throw an error if it cannot compile a function in a way that
-speeds up your code, pass Numba the argument
-``nopython=True`` (e.g. ``@numba.jit(nopython=True)``). For more on
-troubleshooting Numba modes, see the `Numba troubleshooting page
+If Numba is passed a function that includes something it doesn't know how to
+work with -- a category that currently includes sets, lists, dictionaries, or
+string functions -- it will revert to ``object mode``. In ``object mode``,
+Numba will execute but your code will not speed up significantly. If you would
+prefer that Numba throw an error if it cannot compile a function in a way that
+speeds up your code, pass Numba the argument
+``nopython=True`` (e.g. ``@numba.jit(nopython=True)``). For more on
+troubleshooting Numba modes, see the `Numba troubleshooting page
<http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#the-compiled-code-is-too-slow>`__.
Read more in the `Numba docs <http://numba.pydata.org/>`__.
diff --git a/doc/source/extending.rst b/doc/source/extending.rst
index 1e8a8e50dd9e3..6c47d0ae8bd84 100644
--- a/doc/source/extending.rst
+++ b/doc/source/extending.rst
@@ -163,6 +163,7 @@ your ``MyExtensionArray`` class, as follows:
class MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin):
pass
+
MyExtensionArray._add_arithmetic_ops()
MyExtensionArray._add_comparison_ops()
@@ -205,6 +206,7 @@ To use a test, subclass it:
from pandas.tests.extension import base
+
class TestConstructors(base.BaseConstructorsTests):
pass
@@ -277,6 +279,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
def _constructor_expanddim(self):
return SubclassedDataFrame
+
class SubclassedDataFrame(DataFrame):
@property
@@ -297,7 +300,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
>>> type(to_framed)
<class '__main__.SubclassedDataFrame'>
- >>> df = SubclassedDataFrame({'A', [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
+ >>> df = SubclassedDataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]})
>>> df
A B C
0 1 4 7
@@ -313,6 +316,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
0 1 4
1 2 5
2 3 6
+
>>> type(sliced1)
<class '__main__.SubclassedDataFrame'>
@@ -322,6 +326,7 @@ Below example shows how to define ``SubclassedSeries`` and ``SubclassedDataFrame
1 2
2 3
Name: A, dtype: int64
+
>>> type(sliced2)
<class '__main__.SubclassedSeries'>
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst
index 79e312ca12833..0eb2a4eed8581 100644
--- a/doc/source/gotchas.rst
+++ b/doc/source/gotchas.rst
@@ -98,7 +98,7 @@ of the following code should be:
.. code-block:: python
- >>> if pd.Series([False, True, False]):
+ >>> if pd.Series([False, True, False]): # noqa: E999
...
Should it be ``True`` because it's not zero-length, or ``False`` because there
@@ -107,7 +107,7 @@ are ``False`` values? It is unclear, so instead, pandas raises a ``ValueError``:
.. code-block:: python
>>> if pd.Series([False, True, False]):
- print("I was true")
+ ... print("I was true")
Traceback
...
ValueError: The truth value of an array is ambiguous. Use a.empty, a.any() or a.all().
@@ -119,8 +119,8 @@ Alternatively, you might want to compare if the pandas object is ``None``:
.. code-block:: python
>>> if pd.Series([False, True, False]) is not None:
- print("I was not None")
- >>> I was not None
+ ... print("I was not None")
+ I was not None
Below is how to check if any of the values are ``True``:
@@ -128,8 +128,8 @@ Below is how to check if any of the values are ``True``:
.. code-block:: python
>>> if pd.Series([False, True, False]).any():
- print("I am any")
- >>> I am any
+ ... print("I am any")
+ I am any
To evaluate single-element pandas objects in a boolean context, use the method
:meth:`~DataFrame.bool`:
@@ -316,7 +316,7 @@ Occasionally you may have to deal with data that were created on a machine with
a different byte order than the one on which you are running Python. A common
symptom of this issue is an error like:
-.. code-block:: python
+.. code-block:: python-traceback
Traceback
...
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index 755edba352f05..fb96afaf7d796 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -79,7 +79,7 @@ pandas objects can be split on any of their axes. The abstract definition of
grouping is to provide a mapping of labels to group names. To create a GroupBy
object (more on what the GroupBy object is later), you may do the following:
-.. code-block:: ipython
+.. code-block:: python
# default is axis=0
>>> grouped = obj.groupby(key)
@@ -1310,7 +1310,7 @@ arbitrary function, for example:
.. code-block:: python
- (df.groupby(['Store', 'Product']).pipe(report_func)
+ df.groupby(['Store', 'Product']).pipe(report_func)
where ``report_func`` takes a GroupBy object and creates a report
from that.
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 1c63acce6e3fa..5740ab5fa6921 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -537,10 +537,10 @@ A list of indexers where any element is out of bounds will raise an
.. code-block:: python
- dfl.iloc[[4, 5, 6]]
+ >>> dfl.iloc[[4, 5, 6]]
IndexError: positional indexers are out-of-bounds
- dfl.iloc[:, 4]
+ >>> dfl.iloc[:, 4]
IndexError: single positional indexer is out-of-bounds
.. _indexing.callable:
@@ -1794,7 +1794,7 @@ interpreter executes this code:
.. code-block:: python
- dfmi.loc[:,('one','second')] = value
+ dfmi.loc[:, ('one', 'second')] = value
# becomes
dfmi.loc.__setitem__((slice(None), ('one', 'second')), value)
@@ -1827,10 +1827,10 @@ that you've done this:
.. code-block:: python
def do_something(df):
- foo = df[['bar', 'baz']] # Is foo a view? A copy? Nobody knows!
- # ... many lines here ...
- foo['quux'] = value # We don't know whether this will modify df or not!
- return foo
+ foo = df[['bar', 'baz']] # Is foo a view? A copy? Nobody knows!
+ # ... many lines here ...
+ foo['quux'] = value # We don't know whether this will modify df or not!
+ return foo
Yikes!
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 9f458b58717d6..0acb0dfbee2d7 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -1834,8 +1834,7 @@ For example:
.. code-block:: python
- DataFrame([1.0, 2.0, complex(1.0, 2.0)]).to_json() # raises
-
+ >>> DataFrame([1.0, 2.0, complex(1.0, 2.0)]).to_json() # raises
RuntimeError: Unhandled numpy dtype 15
can be dealt with by specifying a simple ``default_handler``:
@@ -2411,8 +2410,8 @@ columns to strings.
.. code-block:: python
url_mcc = 'https://en.wikipedia.org/wiki/Mobile_country_code'
- dfs = pd.read_html(url_mcc, match='Telekom Albania', header=0, converters={'MNC':
- str})
+ dfs = pd.read_html(url_mcc, match='Telekom Albania', header=0,
+ converters={'MNC': str})
.. versionadded:: 0.19
@@ -2724,7 +2723,8 @@ different parameters:
data = {}
# For when Sheet1's format differs from Sheet2
with pd.ExcelFile('path_to_file.xls') as xls:
- data['Sheet1'] = pd.read_excel(xls, 'Sheet1', index_col=None, na_values=['NA'])
+ data['Sheet1'] = pd.read_excel(xls, 'Sheet1', index_col=None,
+ na_values=['NA'])
data['Sheet2'] = pd.read_excel(xls, 'Sheet2', index_col=1)
Note that if the same parsing parameters are used for all sheets, a list
@@ -2735,11 +2735,14 @@ of sheet names can simply be passed to ``read_excel`` with no loss in performanc
# using the ExcelFile class
data = {}
with pd.ExcelFile('path_to_file.xls') as xls:
- data['Sheet1'] = read_excel(xls, 'Sheet1', index_col=None, na_values=['NA'])
- data['Sheet2'] = read_excel(xls, 'Sheet2', index_col=None, na_values=['NA'])
+ data['Sheet1'] = read_excel(xls, 'Sheet1', index_col=None,
+ na_values=['NA'])
+ data['Sheet2'] = read_excel(xls, 'Sheet2', index_col=None,
+ na_values=['NA'])
# equivalent using the read_excel function
- data = read_excel('path_to_file.xls', ['Sheet1', 'Sheet2'], index_col=None, na_values=['NA'])
+ data = read_excel('path_to_file.xls', ['Sheet1', 'Sheet2'],
+ index_col=None, na_values=['NA'])
.. _io.excel.specifying_sheets:
@@ -2899,7 +2902,10 @@ missing data to recover integer dtype:
.. code-block:: python
- cfun = lambda x: int(x) if x else -1
+ def cfun(x):
+ return int(x) if x else -1
+
+
read_excel('path_to_file.xls', 'Sheet1', converters={'MyInts': cfun})
dtype Specifications
@@ -3040,7 +3046,7 @@ argument to ``to_excel`` and to ``ExcelWriter``. The built-in engines are:
writer = ExcelWriter('path_to_file.xlsx', engine='xlsxwriter')
# Or via pandas configuration.
- from pandas import options
+ from pandas import options # noqa: E402
options.io.excel.xlsx.writer = 'xlsxwriter'
df.to_excel('path_to_file.xlsx', sheet_name='Sheet1')
@@ -3067,7 +3073,7 @@ which takes the contents of the clipboard buffer and passes them to the
``read_csv`` method. For instance, you can copy the following text to the
clipboard (CTRL-C on many operating systems):
-.. code-block:: python
+.. code-block:: console
A B C
x 1 4 p
@@ -3476,9 +3482,8 @@ This format is specified by default when using ``put`` or ``to_hdf`` or by ``for
.. code-block:: python
- pd.DataFrame(randn(10, 2)).to_hdf('test_fixed.h5', 'df')
-
- pd.read_hdf('test_fixed.h5', 'df', where='index>5')
+ >>> pd.DataFrame(randn(10, 2)).to_hdf('test_fixed.h5', 'df')
+ >>> pd.read_hdf('test_fixed.h5', 'df', where='index>5')
TypeError: cannot pass a where specification when reading a fixed format.
this store must be selected in its entirety
@@ -3574,7 +3579,7 @@ will yield a tuple for each group key along with the relative keys of its conten
Hierarchical keys cannot be retrieved as dotted (attribute) access as described above for items stored under the root node.
- .. code-block:: python
+ .. code-block:: ipython
In [8]: store.foo.bar.bah
AttributeError: 'HDFStore' object has no attribute 'foo'
@@ -3732,10 +3737,10 @@ The right-hand side of the sub-expression (after a comparison operator) can be:
instead of this
- .. code-block:: python
+ .. code-block:: ipython
string = "HolyMoly'"
- store.select('df', 'index == %s' % string)
+ store.select('df', 'index == %s' % string)
The latter will **not** work and will raise a ``SyntaxError``.Note that
there's a single quote followed by a double quote in the ``string``
@@ -3941,7 +3946,7 @@ The default is 50,000 rows returned in a chunk.
.. code-block:: python
- for df in pd.read_hdf('store.h5','df', chunksize=3):
+ for df in pd.read_hdf('store.h5', 'df', chunksize=3):
print(df)
Note, that the chunksize keyword applies to the **source** rows. So if you
@@ -4871,7 +4876,8 @@ to pass to :func:`pandas.to_datetime`:
.. code-block:: python
pd.read_sql_table('data', engine, parse_dates={'Date': '%Y-%m-%d'})
- pd.read_sql_table('data', engine, parse_dates={'Date': {'format': '%Y-%m-%d %H:%M:%S'}})
+ pd.read_sql_table('data', engine,
+ parse_dates={'Date': {'format': '%Y-%m-%d %H:%M:%S'}})
You can check if a table exists using :func:`~pandas.io.sql.has_table`
@@ -5374,11 +5380,11 @@ And here's the code:
import pandas as pd
import sqlite3
from numpy.random import randn
- from pandas.io import sql
sz = 1000000
df = pd.DataFrame({'A': randn(sz), 'B': [1] * sz})
+
def test_sql_write(df):
if os.path.exists('test.sql'):
os.remove('test.sql')
@@ -5386,55 +5392,73 @@ And here's the code:
df.to_sql(name='test_table', con=sql_db)
sql_db.close()
+
def test_sql_read():
sql_db = sqlite3.connect('test.sql')
pd.read_sql_query("select * from test_table", sql_db)
sql_db.close()
+
def test_hdf_fixed_write(df):
df.to_hdf('test_fixed.hdf', 'test', mode='w')
+
def test_hdf_fixed_read():
pd.read_hdf('test_fixed.hdf', 'test')
+
def test_hdf_fixed_write_compress(df):
df.to_hdf('test_fixed_compress.hdf', 'test', mode='w', complib='blosc')
+
def test_hdf_fixed_read_compress():
pd.read_hdf('test_fixed_compress.hdf', 'test')
+
def test_hdf_table_write(df):
df.to_hdf('test_table.hdf', 'test', mode='w', format='table')
+
def test_hdf_table_read():
pd.read_hdf('test_table.hdf', 'test')
+
def test_hdf_table_write_compress(df):
- df.to_hdf('test_table_compress.hdf', 'test', mode='w', complib='blosc', format='table')
+ df.to_hdf('test_table_compress.hdf', 'test', mode='w',
+ complib='blosc', format='table')
+
def test_hdf_table_read_compress():
pd.read_hdf('test_table_compress.hdf', 'test')
+
def test_csv_write(df):
df.to_csv('test.csv', mode='w')
+
def test_csv_read():
pd.read_csv('test.csv', index_col=0)
+
def test_feather_write(df):
df.to_feather('test.feather')
+
def test_feather_read():
pd.read_feather('test.feather')
+
def test_pickle_write(df):
df.to_pickle('test.pkl')
+
def test_pickle_read():
pd.read_pickle('test.pkl')
+
def test_pickle_write_compress(df):
df.to_pickle('test.pkl.compress', compression='xz')
+
def test_pickle_read_compress():
pd.read_pickle('test.pkl.compress', compression='xz')
diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst
index e4b5578af15f0..4864637691607 100644
--- a/doc/source/missing_data.rst
+++ b/doc/source/missing_data.rst
@@ -696,9 +696,8 @@ You can also operate on the DataFrame in place:
.. code-block:: python
- s = pd.Series([True, False, True])
- s.replace({'a string': 'new value', True: False}) # raises
-
+ >>> s = pd.Series([True, False, True])
+ >>> s.replace({'a string': 'new value', True: False}) # raises
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
will raise a ``TypeError`` because one of the ``dict`` keys is not of the
@@ -728,7 +727,7 @@ rules introduced in the table below.
:header: "data type", "Cast to"
:widths: 40, 40
- integer, float
+ integer, float
boolean, object
float, no cast
object, no cast
diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst
index 7d9925d800441..6163b6f2ae89a 100644
--- a/doc/source/reshaping.rst
+++ b/doc/source/reshaping.rst
@@ -45,13 +45,19 @@ For the curious here is how the above ``DataFrame`` was created:
.. code-block:: python
- import pandas.util.testing as tm; tm.N = 3
+ import pandas.util.testing as tm
+
+ tm.N = 3
+
+
def unpivot(frame):
N, K = frame.shape
- data = {'value' : frame.values.ravel('F'),
- 'variable' : np.asarray(frame.columns).repeat(N),
- 'date' : np.tile(np.asarray(frame.index), K)}
+ data = {'value': frame.values.ravel('F'),
+ 'variable': np.asarray(frame.columns).repeat(N),
+ 'date': np.tile(np.asarray(frame.index), K)}
return pd.DataFrame(data, columns=['date', 'variable', 'value'])
+
+
df = unpivot(tm.makeTimeDataFrame())
To select out everything for variable ``A`` we could do:
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index a52c80106f100..42fd356bbe65a 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -898,7 +898,7 @@ custom date increment logic, such as adding business days:
.. code-block:: python
class BDay(DateOffset):
- """DateOffset increments between business days"""
+ """DateOffset increments between business days"""
def apply(self, other):
...
@@ -2133,7 +2133,8 @@ To convert from an ``int64`` based YYYYMMDD representation.
s
def conv(x):
- return pd.Period(year = x // 10000, month = x//100 % 100, day = x%100, freq='D')
+ return pd.Period(year=x // 10000, month=x // 100 % 100,
+ day=x % 100, freq='D')
s.apply(conv)
s.apply(conv)[2]
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index bc91372e3ac7d..6694737737562 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -204,7 +204,8 @@ def decorator(accessor):
.. code-block:: python
- def __init__(self, pandas_object):
+ def __init__(self, pandas_object): # noqa: E999
+ ...
For consistency with pandas methods, you should raise an ``AttributeError``
if the data passed to your accessor has an incorrect dtype.
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index b2daec327d618..222873cd7f81a 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -760,9 +760,10 @@ def _interp_limit(invalid, fw_limit, bw_limit):
.. code-block:: python
- for x in np.where(invalid)[0]:
- if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
- yield x
+ def _interp_limit(invalid, fw_limit, bw_limit):
+ for x in np.where(invalid)[0]:
+ if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
+ yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
diff --git a/setup.cfg b/setup.cfg
index 17b88d084ebf6..4726a0ddb2fb2 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -30,6 +30,12 @@ exclude =
versioneer.py,
env # exclude asv benchmark environments from linting
+[flake8-rst]
+ignore =
+ F821, # undefined name
+ W391, # blank line at end of file [Seems to be a bug (v0.4.1)]
+
+
[yapf]
based_on_style = pep8
split_before_named_assigns = false
| - [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Integrated [flake8-rst](https://github.com/kataev/flake8-rst) into `code_check.sh` in order to check code in .. code-blocks:: python directives
* fixed all issues found with version v0.4.1
- F821 undefined name ignored -- too many occurences for now
- W391 blank line at end of file -- 4 occurences where I had no idea how to fix
```
doc/source/contributing_docstring.rst:629:9: W391 blank line at end of file
doc/source/contributing_docstring.rst:725:9: W391 blank line at end of file
doc/source/contributing_docstring.rst:815:9: W391 blank line at end of file
doc/source/contributing_docstring.rst:911:9: W391 blank line at end of file
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/23381 | 2018-10-27T11:16:45Z | 2018-11-09T16:26:35Z | 2018-11-09T16:26:35Z | 2018-11-09T16:27:06Z |
CLN: Cleanup top-level pandas namespace | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 768868d585721..9eb2350322a81 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -943,6 +943,7 @@ Removal of prior version deprecations/changes
- :meth:`Index.repeat` and :meth:`MultiIndex.repeat` have renamed the ``n`` argument to ``repeats`` (:issue:`14645`)
- Removal of the previously deprecated ``as_indexer`` keyword completely from ``str.match()`` (:issue:`22356`, :issue:`6581`)
- Removed the ``pandas.formats.style`` shim for :class:`pandas.io.formats.style.Styler` (:issue:`16059`)
+- :func:`pandas.pnow`, :func:`pandas.match`, :func:`pandas.groupby`, :func:`pd.get_store`, ``pd.Expr``, and ``pd.Term`` have been removed (:issue:`15538`, :issue:`15940`)
- :meth:`Categorical.searchsorted` and :meth:`Series.searchsorted` have renamed the ``v`` argument to ``value`` (:issue:`14645`)
- :meth:`TimedeltaIndex.searchsorted`, :meth:`DatetimeIndex.searchsorted`, and :meth:`PeriodIndex.searchsorted` have renamed the ``key`` argument to ``value`` (:issue:`14645`)
- Removal of the previously deprecated module ``pandas.json`` (:issue:`19944`)
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 32df317a602a9..ad35b647ac458 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -14,7 +14,7 @@
MultiIndex, IntervalIndex,
TimedeltaIndex, DatetimeIndex,
PeriodIndex, NaT)
-from pandas.core.indexes.period import Period, period_range, pnow
+from pandas.core.indexes.period import Period, period_range
from pandas.core.indexes.timedeltas import Timedelta, timedelta_range
from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range
from pandas.core.indexes.interval import Interval, interval_range
@@ -36,27 +36,6 @@
describe_option, option_context, options)
-# deprecation, xref #13790
-def match(*args, **kwargs):
-
- import warnings
- warnings.warn("pd.match() is deprecated and will be removed "
- "in a future version",
- FutureWarning, stacklevel=2)
- from pandas.core.algorithms import match
- return match(*args, **kwargs)
-
-
-def groupby(*args, **kwargs):
- import warnings
-
- warnings.warn("pd.groupby() is deprecated and will be removed; "
- "Please use the Series.groupby() or "
- "DataFrame.groupby() methods",
- FutureWarning, stacklevel=2)
- return args[0].groupby(*args[1:], **kwargs)
-
-
# Deprecation: xref gh-16747
class TimeGrouper(object):
diff --git a/pandas/core/computation/api.py b/pandas/core/computation/api.py
index a6fe5aae822df..31e8a4873b0ad 100644
--- a/pandas/core/computation/api.py
+++ b/pandas/core/computation/api.py
@@ -1,14 +1,3 @@
# flake8: noqa
from pandas.core.computation.eval import eval
-
-
-# deprecation, xref #13790
-def Expr(*args, **kwargs):
- import warnings
-
- warnings.warn("pd.Expr is deprecated as it is not "
- "applicable to user code",
- FutureWarning, stacklevel=2)
- from pandas.core.computation.expr import Expr
- return Expr(*args, **kwargs)
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index d23d56cba98ae..e59e696e98e51 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -982,14 +982,6 @@ def base(self):
PeriodIndex._add_datetimelike_methods()
-def pnow(freq=None):
- # deprecation, xref #13790
- warnings.warn("pd.pnow() and pandas.core.indexes.period.pnow() "
- "are deprecated. Please use Period.now()",
- FutureWarning, stacklevel=2)
- return Period.now(freq=freq)
-
-
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency PeriodIndex, with day (calendar) as the default
diff --git a/pandas/io/api.py b/pandas/io/api.py
index 496a00126de87..8c8d7cf73b37a 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -14,20 +14,7 @@
from pandas.io.parquet import read_parquet
from pandas.io.parsers import read_csv, read_fwf, read_table
from pandas.io.pickle import read_pickle, to_pickle
-from pandas.io.pytables import HDFStore, get_store, read_hdf
+from pandas.io.pytables import HDFStore, read_hdf
from pandas.io.sas import read_sas
from pandas.io.sql import read_sql, read_sql_query, read_sql_table
from pandas.io.stata import read_stata
-
-
-# deprecation, xref #13790
-def Term(*args, **kwargs):
- import warnings
-
- warnings.warn("pd.Term is deprecated as it is not "
- "applicable to user code. Instead use in-line "
- "string expressions in the where clause when "
- "searching in HDFStore",
- FutureWarning, stacklevel=2)
- from pandas.io.pytables import Term
- return Term(*args, **kwargs)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index f9595af711621..0830aece8c6f4 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1416,19 +1416,6 @@ def _read_group(self, group, **kwargs):
return s.read(**kwargs)
-def get_store(path, **kwargs):
- """ Backwards compatible alias for ``HDFStore``
- """
- warnings.warn(
- "get_store is deprecated and be "
- "removed in a future version\n"
- "HDFStore(path, **kwargs) is the replacement",
- FutureWarning,
- stacklevel=6)
-
- return HDFStore(path, **kwargs)
-
-
class TableIterator(object):
""" define the iteration interface on a table
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 1a234cdfe3518..e81ce2b4b23e4 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -49,7 +49,7 @@ class TestPDApi(Base):
'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex']
# these are already deprecated; awaiting removal
- deprecated_classes = ['TimeGrouper', 'Expr', 'Term']
+ deprecated_classes = ['TimeGrouper']
# these should be deprecated in the future
deprecated_classes_in_future = ['Panel']
@@ -89,8 +89,7 @@ class TestPDApi(Base):
deprecated_funcs_in_future = []
# these are already deprecated; awaiting removal
- deprecated_funcs = ['pnow', 'match', 'groupby', 'get_store',
- 'plot_params', 'scatter_matrix']
+ deprecated_funcs = ['plot_params', 'scatter_matrix']
def test_api(self):
@@ -131,46 +130,11 @@ class TestTopLevelDeprecations(object):
# top-level API deprecations
# GH 13790
- def test_pnow(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- pd.pnow(freq='M')
-
- def test_term(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- pd.Term('index>=date')
-
- def test_expr(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- pd.Expr('2>1')
-
- def test_match(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- pd.match([1, 2, 3], [1])
-
- def test_groupby(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- pd.groupby(pd.Series([1, 2, 3]), [1, 1, 1])
-
def test_TimeGrouper(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.TimeGrouper(freq='D')
- # GH 15940
-
- def test_get_store(self):
- pytest.importorskip('tables')
- with tm.ensure_clean() as path:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- s = pd.get_store(path)
- s.close()
-
class TestParser(object):
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 4e9da92edcf5e..9dfb493cb129c 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -32,7 +32,7 @@
tables = pytest.importorskip('tables')
from pandas.io import pytables as pytables # noqa:E402
from pandas.io.pytables import (TableIterator, # noqa:E402
- HDFStore, get_store, Term, read_hdf,
+ HDFStore, Term, read_hdf,
PossibleDataLossError, ClosedFileError)
@@ -146,32 +146,6 @@ def teardown_method(self, method):
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestHDFStore(Base):
- def test_factory_fun(self):
- path = create_tempfile(self.path)
- try:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- with get_store(path) as tbl:
- raise ValueError('blah')
- except ValueError:
- pass
- finally:
- safe_remove(path)
-
- try:
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- with get_store(path) as tbl:
- tbl['a'] = tm.makeDataFrame()
-
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- with get_store(path) as tbl:
- assert len(tbl) == 1
- assert type(tbl['a']) == DataFrame
- finally:
- safe_remove(self.path)
-
def test_context(self):
path = create_tempfile(self.path)
try:
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py
index c4c9a5f8452de..e360500d443ea 100644
--- a/pandas/tests/scalar/period/test_period.py
+++ b/pandas/tests/scalar/period/test_period.py
@@ -843,13 +843,6 @@ def test_properties_secondly(self):
assert Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month == 29
- def test_pnow(self):
-
- # deprecation, xref #13790
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- period.pnow('D')
-
def test_constructor_corner(self):
expected = Period('2007-01', freq='2M')
assert Period(year=2007, month=1, freq='2M') == expected
| Drops the following:
* `pd.pnow`
* `pd.match`
* `pd.groupby`
* `pd.get_store`
As well as:
* `pd.Expr`
* `pd.Term`
xref #15538.
xref #15940.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23380 | 2018-10-27T10:40:45Z | 2018-10-28T02:40:51Z | 2018-10-28T02:40:51Z | 2018-10-28T05:20:52Z |
CLN: Remove pd.parser, lib, and tslib | diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml
index f079ac309b97c..8955bea1fc010 100644
--- a/ci/travis-27.yaml
+++ b/ci/travis-27.yaml
@@ -35,7 +35,7 @@ dependencies:
- s3fs
- scipy
- sqlalchemy=0.9.6
- - xarray=0.8.0
+ - xarray=0.9.6
- xlrd=0.9.2
- xlsxwriter=0.5.2
- xlwt=0.7.5
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 768868d585721..5a0274f9bdf86 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -944,6 +944,7 @@ Removal of prior version deprecations/changes
- Removal of the previously deprecated ``as_indexer`` keyword completely from ``str.match()`` (:issue:`22356`, :issue:`6581`)
- Removed the ``pandas.formats.style`` shim for :class:`pandas.io.formats.style.Styler` (:issue:`16059`)
- :meth:`Categorical.searchsorted` and :meth:`Series.searchsorted` have renamed the ``v`` argument to ``value`` (:issue:`14645`)
+- ``pandas.parser``, ``pandas.lib``, and ``pandas.tslib`` have been removed (:issue:`15537`)
- :meth:`TimedeltaIndex.searchsorted`, :meth:`DatetimeIndex.searchsorted`, and :meth:`PeriodIndex.searchsorted` have renamed the ``key`` argument to ``value`` (:issue:`14645`)
- Removal of the previously deprecated module ``pandas.json`` (:issue:`19944`)
- :meth:`SparseArray.get_values` and :meth:`SparseArray.to_dense` have dropped the ``fill`` parameter (:issue:`14686`)
diff --git a/pandas/__init__.py b/pandas/__init__.py
index e446782d9665e..af14319419c42 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -58,24 +58,6 @@
from pandas.util._tester import test
import pandas.testing
-# extension module deprecations
-from pandas.util._depr_module import _DeprecatedModule
-
-parser = _DeprecatedModule(deprmod='pandas.parser',
- removals=['na_values'],
- moved={'CParserError': 'pandas.errors.ParserError'})
-lib = _DeprecatedModule(deprmod='pandas.lib', deprmodto=False,
- moved={'Timestamp': 'pandas.Timestamp',
- 'Timedelta': 'pandas.Timedelta',
- 'NaT': 'pandas.NaT',
- 'infer_dtype': 'pandas.api.types.infer_dtype'})
-tslib = _DeprecatedModule(deprmod='pandas.tslib',
- moved={'Timestamp': 'pandas.Timestamp',
- 'Timedelta': 'pandas.Timedelta',
- 'NaT': 'pandas.NaT',
- 'NaTType': 'type(pandas.NaT)',
- 'OutOfBoundsDatetime': 'pandas.errors.OutOfBoundsDatetime'})
-
# use the closest tagged version if possible
from ._version import get_versions
v = get_versions()
diff --git a/pandas/lib.py b/pandas/lib.py
deleted file mode 100644
index 859a78060fcc1..0000000000000
--- a/pandas/lib.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# flake8: noqa
-
-import warnings
-warnings.warn("The pandas.lib module is deprecated and will be "
- "removed in a future version. These are private functions "
- "and can be accessed from pandas._libs.lib instead",
- FutureWarning, stacklevel=2)
-from pandas._libs.lib import *
diff --git a/pandas/parser.py b/pandas/parser.py
deleted file mode 100644
index f43a408c943d0..0000000000000
--- a/pandas/parser.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# flake8: noqa
-
-import warnings
-warnings.warn("The pandas.parser module is deprecated and will be "
- "removed in a future version. Please import from "
- "pandas.io.parser instead", FutureWarning, stacklevel=2)
-from pandas._libs.parsers import na_values
-from pandas.io.common import CParserError
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 1a234cdfe3518..0d46f7745d43c 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -34,7 +34,7 @@ class TestPDApi(Base):
'util', 'options', 'io']
# these are already deprecated; awaiting removal
- deprecated_modules = ['parser', 'lib', 'tslib']
+ deprecated_modules = []
# misc
misc = ['IndexSlice', 'NaT']
@@ -172,27 +172,6 @@ def test_get_store(self):
s.close()
-class TestParser(object):
-
- @pytest.mark.filterwarnings("ignore")
- def test_deprecation_access_func(self):
- pd.parser.na_values
-
-
-class TestLib(object):
-
- @pytest.mark.filterwarnings("ignore")
- def test_deprecation_access_func(self):
- pd.lib.infer_dtype('foo')
-
-
-class TestTSLib(object):
-
- @pytest.mark.filterwarnings("ignore")
- def test_deprecation_access_func(self):
- pd.tslib.Timestamp('20160101')
-
-
class TestTypes(object):
def test_deprecation_access_func(self):
diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py
index ed80c1414dbaa..4ea501dacddf3 100644
--- a/pandas/tests/api/test_types.py
+++ b/pandas/tests/api/test_types.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-import sys
import pytest
from pandas.api import types
@@ -53,16 +52,3 @@ def test_deprecated_from_api_types(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(types, t)(1)
-
-
-def test_moved_infer_dtype():
- # del from sys.modules to ensure we try to freshly load.
- # if this was imported from another test previously, we would
- # not see the warning, since the import is otherwise cached.
- sys.modules.pop("pandas.lib", None)
-
- with tm.assert_produces_warning(FutureWarning):
- import pandas.lib
-
- e = pandas.lib.infer_dtype('foo')
- assert e is not None
diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py
index 76e003c463e7d..fbc0faa4c929a 100644
--- a/pandas/tests/test_errors.py
+++ b/pandas/tests/test_errors.py
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
import pytest
-from warnings import catch_warnings, simplefilter
import pandas # noqa
import pandas as pd
from pandas.errors import AbstractMethodError
@@ -47,13 +46,6 @@ def test_error_rename():
except CParserError:
pass
- with catch_warnings(record=True):
- simplefilter("ignore")
- try:
- raise ParserError()
- except pd.parser.CParserError:
- pass
-
class Foo(object):
@classmethod
diff --git a/pandas/tslib.py b/pandas/tslib.py
deleted file mode 100644
index fc4a1ccb5da00..0000000000000
--- a/pandas/tslib.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# flake8: noqa
-
-import warnings
-warnings.warn("The pandas.tslib module is deprecated and will be "
- "removed in a future version.", FutureWarning, stacklevel=2)
-from pandas._libs.tslibs import Timestamp, Timedelta, OutOfBoundsDatetime
-from pandas._libs.tslibs.nattype import NaT, NaTType
| Title is self-explanatory.
xref #15537. | https://api.github.com/repos/pandas-dev/pandas/pulls/23378 | 2018-10-27T09:25:47Z | 2018-10-28T02:33:58Z | 2018-10-28T02:33:58Z | 2018-10-28T05:19:32Z |
CLN: Remove the .consolidate method | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 768868d585721..14244fb2fa7b7 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -945,6 +945,7 @@ Removal of prior version deprecations/changes
- Removed the ``pandas.formats.style`` shim for :class:`pandas.io.formats.style.Styler` (:issue:`16059`)
- :meth:`Categorical.searchsorted` and :meth:`Series.searchsorted` have renamed the ``v`` argument to ``value`` (:issue:`14645`)
- :meth:`TimedeltaIndex.searchsorted`, :meth:`DatetimeIndex.searchsorted`, and :meth:`PeriodIndex.searchsorted` have renamed the ``key`` argument to ``value`` (:issue:`14645`)
+- :meth:`DataFrame.consolidate` and :meth:`Series.consolidate` have been removed (:issue:`15501`)
- Removal of the previously deprecated module ``pandas.json`` (:issue:`19944`)
- :meth:`SparseArray.get_values` and :meth:`SparseArray.to_dense` have dropped the ``fill`` parameter (:issue:`14686`)
- :meth:`SparseSeries.to_dense` has dropped the ``sparse_only`` parameter (:issue:`14686`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a80b6df703df0..44497c5dcb377 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -117,7 +117,7 @@ class NDFrame(PandasObject, SelectionMixin):
_internal_names_set = set(_internal_names)
_accessors = frozenset([])
_deprecations = frozenset(['as_blocks', 'blocks',
- 'consolidate', 'convert_objects', 'is_copy'])
+ 'convert_objects', 'is_copy'])
_metadata = []
_is_copy = None
@@ -4722,18 +4722,6 @@ def _consolidate(self, inplace=False):
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
- def consolidate(self, inplace=False):
- """Compute NDFrame with "consolidated" internals (data of each dtype
- grouped together in a single ndarray).
-
- .. deprecated:: 0.20.0
- Consolidate will be an internal implementation only.
- """
- # 15483
- warnings.warn("consolidate is deprecated and will be removed in a "
- "future release.", FutureWarning, stacklevel=2)
- return self._consolidate(inplace)
-
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index 5f1d4954521ed..8a7d7d790a1b4 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -54,11 +54,6 @@ def test_consolidate(self, float_frame):
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
- def test_consolidate_deprecation(self, float_frame):
- float_frame['E'] = 7
- with tm.assert_produces_warning(FutureWarning):
- float_frame.consolidate()
-
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
| Title is self-explanatory.
xref #15501. | https://api.github.com/repos/pandas-dev/pandas/pulls/23377 | 2018-10-27T08:39:00Z | 2018-10-28T02:42:05Z | 2018-10-28T02:42:05Z | 2018-10-28T05:21:20Z |
CLN: Remove pandas.tools module | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 51c398518c153..bf6a07f52e59d 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -950,6 +950,7 @@ Removal of prior version deprecations/changes
- :meth:`TimedeltaIndex.searchsorted`, :meth:`DatetimeIndex.searchsorted`, and :meth:`PeriodIndex.searchsorted` have renamed the ``key`` argument to ``value`` (:issue:`14645`)
- :meth:`DataFrame.consolidate` and :meth:`Series.consolidate` have been removed (:issue:`15501`)
- Removal of the previously deprecated module ``pandas.json`` (:issue:`19944`)
+- The module ``pandas.tools`` has been removed (:issue:`15358`, :issue:`16005`)
- :meth:`SparseArray.get_values` and :meth:`SparseArray.to_dense` have dropped the ``fill`` parameter (:issue:`14686`)
- :meth:`DataFrame.sortlevel` and :meth:`Series.sortlevel` have been removed (:issue:`15099`)
- :meth:`SparseSeries.to_dense` has dropped the ``sparse_only`` parameter (:issue:`14686`)
diff --git a/pandas/__init__.py b/pandas/__init__.py
index af14319419c42..e86ed86fda74f 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -45,14 +45,6 @@
from pandas.core.computation.api import *
from pandas.core.reshape.api import *
-# deprecate tools.plotting, plot_params and scatter_matrix on the top namespace
-import pandas.tools.plotting
-plot_params = pandas.plotting._style._Options(deprecated=True)
-# do not import deprecate to top namespace
-scatter_matrix = pandas.util._decorators.deprecate(
- 'pandas.scatter_matrix', pandas.plotting.scatter_matrix, '0.20.0',
- 'pandas.plotting.scatter_matrix')
-
from pandas.util._print_versions import show_versions
from pandas.io.api import *
from pandas.util._tester import test
diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py
index 9bc12d22e1685..da26c0f8fa7e2 100644
--- a/pandas/plotting/_style.py
+++ b/pandas/plotting/_style.py
@@ -110,14 +110,7 @@ def __init__(self, deprecated=False):
# self['xaxis.compat'] = False
super(_Options, self).__setitem__('xaxis.compat', False)
- def _warn_if_deprecated(self):
- if self._deprecated:
- warnings.warn("'pandas.plot_params' is deprecated. Use "
- "'pandas.plotting.plot_params' instead",
- FutureWarning, stacklevel=3)
-
def __getitem__(self, key):
- self._warn_if_deprecated()
key = self._get_canonical_key(key)
if key not in self:
raise ValueError(
@@ -125,7 +118,6 @@ def __getitem__(self, key):
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
- self._warn_if_deprecated()
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
@@ -148,7 +140,6 @@ def reset(self):
-------
None
"""
- self._warn_if_deprecated()
self.__init__()
def _get_canonical_key(self, key):
@@ -160,7 +151,6 @@ def use(self, key, value):
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
- self._warn_if_deprecated()
old_value = self[key]
try:
self[key] = value
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index bb544d2ee81fd..9bf26ccec447b 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -29,7 +29,7 @@ class TestPDApi(Base):
# top-level sub-packages
lib = ['api', 'compat', 'core', 'errors', 'pandas',
- 'plotting', 'test', 'testing', 'tools', 'tseries',
+ 'plotting', 'test', 'testing', 'tseries',
'util', 'options', 'io']
# these are already deprecated; awaiting removal
@@ -88,7 +88,7 @@ class TestPDApi(Base):
deprecated_funcs_in_future = []
# these are already deprecated; awaiting removal
- deprecated_funcs = ['plot_params', 'scatter_matrix']
+ deprecated_funcs = []
def test_api(self):
diff --git a/pandas/tests/plotting/test_deprecated.py b/pandas/tests/plotting/test_deprecated.py
deleted file mode 100644
index a45b17ec98261..0000000000000
--- a/pandas/tests/plotting/test_deprecated.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# coding: utf-8
-
-import string
-
-import pandas as pd
-import pandas.util.testing as tm
-import pandas.util._test_decorators as td
-import pytest
-
-from numpy.random import randn
-
-import pandas.tools.plotting as plotting
-
-from pandas.tests.plotting.common import TestPlotBase
-
-
-"""
-Test cases for plot functions imported from deprecated
-pandas.tools.plotting
-"""
-
-
-@td.skip_if_no_mpl
-class TestDeprecatedNameSpace(TestPlotBase):
-
- @pytest.mark.slow
- @td.skip_if_no_scipy
- def test_scatter_plot_legacy(self):
- df = pd.DataFrame(randn(100, 2))
-
- with tm.assert_produces_warning(FutureWarning):
- plotting.scatter_matrix(df)
-
- with tm.assert_produces_warning(FutureWarning):
- pd.scatter_matrix(df)
-
- @pytest.mark.slow
- def test_boxplot_deprecated(self):
- df = pd.DataFrame(randn(6, 4),
- index=list(string.ascii_letters[:6]),
- columns=['one', 'two', 'three', 'four'])
- df['indic'] = ['foo', 'bar'] * 3
-
- with tm.assert_produces_warning(FutureWarning):
- plotting.boxplot(df, column=['one', 'two'],
- by='indic')
-
- @pytest.mark.slow
- def test_radviz_deprecated(self, iris):
- with tm.assert_produces_warning(FutureWarning):
- plotting.radviz(frame=iris, class_column='Name')
-
- @pytest.mark.slow
- def test_plot_params(self):
-
- with tm.assert_produces_warning(FutureWarning):
- pd.plot_params['xaxis.compat'] = True
diff --git a/pandas/tools/__init__.py b/pandas/tools/__init__.py
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
deleted file mode 100644
index cd58aa2c7f923..0000000000000
--- a/pandas/tools/merge.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import warnings
-
-
-# back-compat of pseudo-public API
-def concat_wrap():
-
- def wrapper(*args, **kwargs):
- warnings.warn("pandas.tools.merge.concat is deprecated. "
- "import from the public API: "
- "pandas.concat instead",
- FutureWarning, stacklevel=3)
- import pandas as pd
- return pd.concat(*args, **kwargs)
- return wrapper
-
-
-concat = concat_wrap()
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
deleted file mode 100644
index a68da67a219e2..0000000000000
--- a/pandas/tools/plotting.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import sys
-import warnings
-
-import pandas.plotting as _plotting
-
-# back-compat of public API
-# deprecate these functions
-m = sys.modules['pandas.tools.plotting']
-for t in [t for t in dir(_plotting) if not t.startswith('_')]:
-
- def outer(t=t):
-
- def wrapper(*args, **kwargs):
- warnings.warn("'pandas.tools.plotting.{t}' is deprecated, "
- "import 'pandas.plotting.{t}' instead.".format(t=t),
- FutureWarning, stacklevel=2)
- return getattr(_plotting, t)(*args, **kwargs)
- return wrapper
-
- setattr(m, t, outer(t))
| Title is self-explanatory.
xref #15358.
xref #16005. | https://api.github.com/repos/pandas-dev/pandas/pulls/23376 | 2018-10-27T07:46:12Z | 2018-10-28T13:42:56Z | 2018-10-28T13:42:56Z | 2018-10-28T18:47:57Z |
CLN: Drop .sortlevel method | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 768868d585721..89c9dc17f5311 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -947,6 +947,7 @@ Removal of prior version deprecations/changes
- :meth:`TimedeltaIndex.searchsorted`, :meth:`DatetimeIndex.searchsorted`, and :meth:`PeriodIndex.searchsorted` have renamed the ``key`` argument to ``value`` (:issue:`14645`)
- Removal of the previously deprecated module ``pandas.json`` (:issue:`19944`)
- :meth:`SparseArray.get_values` and :meth:`SparseArray.to_dense` have dropped the ``fill`` parameter (:issue:`14686`)
+- :meth:`DataFrame.sortlevel` and :meth:`Series.sortlevel` have been removed (:issue:`15099`)
- :meth:`SparseSeries.to_dense` has dropped the ``sparse_only`` parameter (:issue:`14686`)
.. _whatsnew_0240.performance:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 61721ce4c82e7..a58d34574d28d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -361,7 +361,7 @@ def _constructor(self):
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
- ['sortlevel', 'get_value', 'set_value', 'from_csv', 'from_items'])
+ ['get_value', 'set_value', 'from_csv', 'from_items'])
_accessors = set()
@property
@@ -4645,40 +4645,6 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
else:
return self._constructor(new_data).__finalize__(self)
- def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
- sort_remaining=True):
- """Sort multilevel index by chosen axis and primary level. Data will be
- lexicographically sorted by the chosen level followed by the other
- levels (in order).
-
- .. deprecated:: 0.20.0
- Use :meth:`DataFrame.sort_index`
-
-
- Parameters
- ----------
- level : int
- axis : {0 or 'index', 1 or 'columns'}, default 0
- ascending : boolean, default True
- inplace : boolean, default False
- Sort the DataFrame without creating a new instance
- sort_remaining : boolean, default True
- Sort by the other levels too.
-
- Returns
- -------
- sorted : DataFrame
-
- See Also
- --------
- DataFrame.sort_index(level=...)
-
- """
- warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
- FutureWarning, stacklevel=2)
- return self.sort_index(level=level, axis=axis, ascending=ascending,
- inplace=inplace, sort_remaining=sort_remaining)
-
def nlargest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in descending order.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d03a88ea78f6f..64d2e803dc8e1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -145,7 +145,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_metadata = ['name']
_accessors = {'dt', 'cat', 'str', 'sparse'}
_deprecations = generic.NDFrame._deprecations | frozenset(
- ['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value',
+ ['asobject', 'reshape', 'get_value', 'set_value',
'from_csv', 'valid'])
# Override cache_readonly bc Series is mutable
@@ -2962,33 +2962,6 @@ def nsmallest(self, n=5, keep='first'):
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
- def sortlevel(self, level=0, ascending=True, sort_remaining=True):
- """Sort Series with MultiIndex by chosen level. Data will be
- lexicographically sorted by the chosen level followed by the other
- levels (in order),
-
- .. deprecated:: 0.20.0
- Use :meth:`Series.sort_index`
-
- Parameters
- ----------
- level : int or level name, default None
- ascending : bool, default True
-
- Returns
- -------
- sorted : Series
-
- See Also
- --------
- Series.sort_index(level=...)
-
- """
- warnings.warn("sortlevel is deprecated, use sort_index(level=...)",
- FutureWarning, stacklevel=2)
- return self.sort_index(level=level, ascending=ascending,
- sort_remaining=sort_remaining)
-
def swaplevel(self, i=-2, j=-1, copy=True):
"""
Swap levels i and j in a MultiIndex
diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py
index 41b11d9c15f35..b99e8983b5ba1 100644
--- a/pandas/tests/frame/test_sorting.py
+++ b/pandas/tests/frame/test_sorting.py
@@ -21,14 +21,6 @@
class TestDataFrameSorting(TestData):
- def test_sort(self):
- frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
- columns=['A', 'B', 'C', 'D'])
-
- # see gh-9816
- with tm.assert_produces_warning(FutureWarning):
- frame.sortlevel()
-
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py
index 13e0d1b12c372..b97a8b762da88 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/test_sorting.py
@@ -15,13 +15,6 @@
class TestSeriesSorting(TestData):
- def test_sortlevel_deprecated(self):
- ts = self.ts.copy()
-
- # see gh-9816
- with tm.assert_produces_warning(FutureWarning):
- ts.sortlevel()
-
def test_sort_values(self):
# check indexes are reordered corresponding with the values
| Title is self-explanatory.
xref #15099. | https://api.github.com/repos/pandas-dev/pandas/pulls/23375 | 2018-10-27T06:39:46Z | 2018-10-28T02:43:35Z | 2018-10-28T02:43:35Z | 2018-10-28T05:21:55Z |
CLN: Rename raise_on_error to errors for .dtype | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 768868d585721..1056d200a7e63 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -948,6 +948,7 @@ Removal of prior version deprecations/changes
- Removal of the previously deprecated module ``pandas.json`` (:issue:`19944`)
- :meth:`SparseArray.get_values` and :meth:`SparseArray.to_dense` have dropped the ``fill`` parameter (:issue:`14686`)
- :meth:`SparseSeries.to_dense` has dropped the ``sparse_only`` parameter (:issue:`14686`)
+- :meth:`DataFrame.astype` and :meth:`Series.astype` have renamed the ``raise_on_error`` argument to ``errors`` (:issue:`14967`)
.. _whatsnew_0240.performance:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a80b6df703df0..e6a97ec7f530a 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -53,8 +53,7 @@
isidentifier, set_function_name, cPickle as pkl)
from pandas.core.ops import _align_method_FRAME
import pandas.core.nanops as nanops
-from pandas.util._decorators import (Appender, Substitution,
- deprecate_kwarg)
+from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core import config
@@ -5148,8 +5147,6 @@ def _to_dict_of_blocks(self, copy=True):
return {k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()}
- @deprecate_kwarg(old_arg_name='raise_on_error', new_arg_name='errors',
- mapping={True: 'raise', False: 'ignore'})
def astype(self, dtype, copy=True, errors='raise', **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
@@ -5173,9 +5170,6 @@ def astype(self, dtype, copy=True, errors='raise', **kwargs):
.. versionadded:: 0.20.0
- raise_on_error : raise on invalid input
- .. deprecated:: 0.20.0
- Use ``errors`` instead
kwargs : keyword arguments to pass on to the constructor
Returns
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 2afaeea3755d0..2dbf3e9784749 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -830,9 +830,6 @@ def test_arg_for_errors_in_astype(self):
with pytest.raises(ValueError):
df.astype(np.float64, errors=True)
- with tm.assert_produces_warning(FutureWarning):
- df.astype(np.int8, raise_on_error=False)
-
df.astype(np.int8, errors='ignore')
@pytest.mark.parametrize('input_vals', [
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 55a1afcb504e7..a0058bc42cefb 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -454,9 +454,6 @@ def test_arg_for_errors_in_astype(self):
with pytest.raises(ValueError):
s.astype(np.float64, errors=False)
- with tm.assert_produces_warning(FutureWarning):
- s.astype(np.int8, raise_on_error=True)
-
s.astype(np.int8, errors='raise')
def test_intercept_astype_object(self):
| Title is self-explanatory.
xref #14967.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23374 | 2018-10-27T05:56:47Z | 2018-10-28T02:44:16Z | 2018-10-28T02:44:16Z | 2018-10-28T05:22:24Z |
STYLE: Specify bare exceptions in pandas/tests | diff --git a/.pep8speaks.yml b/.pep8speaks.yml
index ae1c92832bb15..cbcb098c47125 100644
--- a/.pep8speaks.yml
+++ b/.pep8speaks.yml
@@ -13,7 +13,6 @@ pycodestyle:
- W503, # line break before binary operator
- W504, # line break after binary operator
- E402, # module level import not at top of file
- - E722, # do not use bare except
- E731, # do not assign a lambda expression, use a def
- C406, # Unnecessary list literal - rewrite as a dict literal.
- C408, # Unnecessary dict call - rewrite as a literal.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 47adc80204fcc..d88b5e9757423 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -586,7 +586,7 @@ def linkcode_resolve(domain, info):
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
- except:
+ except AttributeError:
return None
try:
@@ -595,14 +595,14 @@ def linkcode_resolve(domain, info):
fn = inspect.getsourcefile(inspect.unwrap(obj))
else:
fn = inspect.getsourcefile(obj)
- except:
+ except TypeError:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
- except:
+ except OSError:
lineno = None
if lineno:
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 5053c7f3d9875..f4d6fe428515e 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -156,7 +156,7 @@ def get_result(self, obj, method, key, axis):
with catch_warnings(record=True):
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
- except:
+ except AttributeError:
xp = getattr(obj, method).__getitem__(key)
return xp
@@ -219,7 +219,7 @@ def _print(result, error=None):
try:
xp = self.get_result(obj, method2, k2, a)
- except:
+ except Exception:
result = 'no comp'
_print(result)
return
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 0814df8240e13..7d26ea05b514a 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -70,7 +70,7 @@ def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == '...')[0][0]
- except:
+ except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
@@ -459,7 +459,7 @@ def test_to_string_repr_unicode(self):
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
- except:
+ except AttributeError:
pass
if not line.startswith('dtype:'):
assert len(line) == line_len
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index f7157fe13e14a..4a68719eedc9a 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -51,7 +51,7 @@ def safe_remove(path):
if path is not None:
try:
os.remove(path)
- except:
+ except OSError:
pass
@@ -59,7 +59,7 @@ def safe_close(store):
try:
if store is not None:
store.close()
- except:
+ except IOError:
pass
@@ -117,7 +117,7 @@ def _maybe_remove(store, key):
no content from previous tests using the same table name."""
try:
store.remove(key)
- except:
+ except (ValueError, KeyError):
pass
@@ -4601,7 +4601,7 @@ def do_copy(f, new_f=None, keys=None,
safe_close(tstore)
try:
os.close(fd)
- except:
+ except (OSError, ValueError):
pass
safe_remove(new_f)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 6bb7800b72110..eeeb55cb8e70c 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -1815,6 +1815,7 @@ def test_default_type_conversion(self):
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
+ import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})
@@ -1833,7 +1834,7 @@ def test_read_procedure(self):
try:
r1 = connection.execute(proc) # noqa
trans.commit()
- except:
+ except pymysql.Error:
trans.rollback()
raise
@@ -2418,7 +2419,7 @@ def setup_class(cls):
# No real user should allow root access with a blank password.
pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
- except:
+ except pymysql.Error:
pass
else:
return
@@ -2445,7 +2446,7 @@ def setup_method(self, request, datapath):
# No real user should allow root access with a blank password.
self.conn = pymysql.connect(host='localhost', user='root',
passwd='', db='pandas_nosetest')
- except:
+ except pymysql.Error:
pass
else:
return
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 2717b92e05a29..70d2c9080ab94 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -1376,7 +1376,7 @@ def f():
try:
df = f()
- except:
+ except ValueError:
pass
assert (df['foo', 'one'] == 0).all()
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index b06463d3c07aa..49dbccb82fac8 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -141,12 +141,12 @@ def _coerce_tds(targ, res):
if axis != 0 and hasattr(
targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
- except:
+ except (ValueError, IndexError):
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
- except:
+ except AssertionError:
# handle timedelta dtypes
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
@@ -167,11 +167,11 @@ def _coerce_tds(targ, res):
else:
try:
res = res.astype('c16')
- except:
+ except RuntimeError:
res = res.astype('f8')
try:
targ = targ.astype('c16')
- except:
+ except RuntimeError:
targ = targ.astype('f8')
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 0e45fd6411ac0..6d5d07b00398c 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -335,13 +335,13 @@ def check_op(op, name):
for op in ops:
try:
check_op(getattr(operator, op), op)
- except:
+ except AttributeError:
pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
- except:
+ except AttributeError:
pprint_thing("Failing operation: %r" % 'div')
raise
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index 42f0cebea83a0..d128a66a182ba 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -2637,7 +2637,7 @@ def test_slice(self):
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
- except:
+ except IndexError:
print('failed on %s:%s:%s' % (start, stop, step))
raise
diff --git a/setup.cfg b/setup.cfg
index 7212833435997..5132e6c5f79cd 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -17,7 +17,6 @@ ignore =
W503, # line break before binary operator
W504, # line break after binary operator
E402, # module level import not at top of file
- E722, # do not use bare except
E731, # do not assign a lambda expression, use a def
C406, # Unnecessary list literal - rewrite as a dict literal.
C408, # Unnecessary dict call - rewrite as a literal.
| - [x] closes #22872
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The empty `except` statements now capture specific exceptions.
Following warnings are fixed:
```
pandas/tests/test_panel.py:338:13: E722 do not use bare 'except'
pandas/tests/test_panel.py:344:13: E722 do not use bare 'except'
pandas/tests/test_nanops.py:144:9: E722 do not use bare 'except'
pandas/tests/test_nanops.py:149:9: E722 do not use bare 'except'
pandas/tests/test_nanops.py:170:21: E722 do not use bare 'except'
pandas/tests/test_nanops.py:174:21: E722 do not use bare 'except'
pandas/tests/test_multilevel.py:1381:9: E722 do not use bare 'except'
pandas/tests/test_strings.py:2632:13: E722 do not use bare 'except'
pandas/tests/io/test_pytables.py:54:9: E722 do not use bare 'except'
pandas/tests/io/test_pytables.py:62:5: E722 do not use bare 'except'
pandas/tests/io/test_pytables.py:120:5: E722 do not use bare 'except'
pandas/tests/io/test_pytables.py:4624:21: E722 do not use bare 'except'
pandas/tests/io/test_sql.py:1793:9: E722 do not use bare 'except'
pandas/tests/io/test_sql.py:2378:9: E722 do not use bare 'except'
pandas/tests/io/test_sql.py:2405:9: E722 do not use bare 'except'
pandas/tests/io/formats/test_format.py:73:5: E722 do not use bare 'except'
pandas/tests/io/formats/test_format.py:455:13: E722 do not use bare 'except'
pandas/tests/indexing/common.py:154:13: E722 do not use bare 'except'
pandas/tests/indexing/common.py:217:17: E722 do not use bare 'except'
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/23370 | 2018-10-27T02:32:16Z | 2018-11-19T01:25:28Z | 2018-11-19T01:25:28Z | 2018-11-19T05:34:12Z |
Fix import format at pandas/tests/io/parser directory | diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py
index 9dc7b070f889d..7769b62dc0206 100644
--- a/pandas/tests/io/parser/c_parser_only.py
+++ b/pandas/tests/io/parser/c_parser_only.py
@@ -11,14 +11,14 @@
import sys
import tarfile
-import pytest
import numpy as np
+import pytest
import pandas as pd
-import pandas.util.testing as tm
import pandas.util._test_decorators as td
+import pandas.util.testing as tm
from pandas import DataFrame
-from pandas.compat import StringIO, range, lrange
+from pandas.compat import StringIO, lrange, range
class CParserTests(object):
diff --git a/pandas/tests/io/parser/comment.py b/pandas/tests/io/parser/comment.py
index 9987a017cf985..c71f92d8fb1be 100644
--- a/pandas/tests/io/parser/comment.py
+++ b/pandas/tests/io/parser/comment.py
@@ -6,8 +6,8 @@
"""
import numpy as np
-import pandas.util.testing as tm
+import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import StringIO
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index 49e42786d6fb8..82557815ed5cf 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -1,26 +1,23 @@
# -*- coding: utf-8 -*-
+import codecs
import csv
import os
import platform
-import codecs
-
import re
import sys
-from datetime import datetime
from collections import OrderedDict
+from datetime import datetime
from io import TextIOWrapper
-import pytest
import numpy as np
-from pandas._libs.tslib import Timestamp
+import pytest
import pandas as pd
import pandas.util.testing as tm
-from pandas import DataFrame, Series, Index, MultiIndex
-from pandas import compat
-from pandas.compat import (StringIO, BytesIO, PY3,
- range, lrange, u)
+from pandas import DataFrame, Index, MultiIndex, Series, compat
+from pandas._libs.tslib import Timestamp
+from pandas.compat import PY3, BytesIO, StringIO, lrange, range, u
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py
index 5a28b6263f20f..5efed89663e0f 100644
--- a/pandas/tests/io/parser/compression.py
+++ b/pandas/tests/io/parser/compression.py
@@ -5,15 +5,16 @@
of the parsers defined in parsers.py
"""
+import bz2
+import gzip
+
import pytest
import pandas as pd
import pandas.compat as compat
-import pandas.util.testing as tm
import pandas.util._test_decorators as td
+import pandas.util.testing as tm
-import gzip
-import bz2
try:
lzma = compat.import_lzma()
except ImportError:
diff --git a/pandas/tests/io/parser/converters.py b/pandas/tests/io/parser/converters.py
index ae35d45591dc5..be14e7be0ab9b 100644
--- a/pandas/tests/io/parser/converters.py
+++ b/pandas/tests/io/parser/converters.py
@@ -7,15 +7,14 @@
from datetime import datetime
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
import pandas.util.testing as tm
-
-from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
-from pandas.compat import parse_date, StringIO, lmap
+from pandas._libs.tslib import Timestamp
+from pandas.compat import StringIO, lmap, parse_date
class ConverterTests(object):
diff --git a/pandas/tests/io/parser/dialect.py b/pandas/tests/io/parser/dialect.py
index f756fe71bf684..2a519a0083f40 100644
--- a/pandas/tests/io/parser/dialect.py
+++ b/pandas/tests/io/parser/dialect.py
@@ -7,12 +7,11 @@
import csv
+import pandas.util.testing as tm
from pandas import DataFrame
from pandas.compat import StringIO
from pandas.errors import ParserWarning
-import pandas.util.testing as tm
-
class DialectTests(object):
diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py
index 8060ebf2fbcd4..00dfb616c18ca 100644
--- a/pandas/tests/io/parser/dtypes.py
+++ b/pandas/tests/io/parser/dtypes.py
@@ -5,13 +5,12 @@
for all of the parsers defined in parsers.py
"""
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
import pandas.util.testing as tm
-
-from pandas import DataFrame, Series, Index, MultiIndex, Categorical
+from pandas import Categorical, DataFrame, Index, MultiIndex, Series
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
diff --git a/pandas/tests/io/parser/header.py b/pandas/tests/io/parser/header.py
index ad3d4592bd599..ced59fe1696d6 100644
--- a/pandas/tests/io/parser/header.py
+++ b/pandas/tests/io/parser/header.py
@@ -7,11 +7,10 @@
from collections import namedtuple
+import numpy as np
import pytest
-import numpy as np
import pandas.util.testing as tm
-
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, lrange, u
diff --git a/pandas/tests/io/parser/index_col.py b/pandas/tests/io/parser/index_col.py
index ee9b210443636..973fa4c57545a 100644
--- a/pandas/tests/io/parser/index_col.py
+++ b/pandas/tests/io/parser/index_col.py
@@ -9,7 +9,6 @@
import pytest
import pandas.util.testing as tm
-
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO
diff --git a/pandas/tests/io/parser/mangle_dupes.py b/pandas/tests/io/parser/mangle_dupes.py
index 6df69eb475bf7..d043f64460028 100644
--- a/pandas/tests/io/parser/mangle_dupes.py
+++ b/pandas/tests/io/parser/mangle_dupes.py
@@ -6,10 +6,9 @@
de-duplicated (if mangling requested) or ignored otherwise.
"""
-from pandas.compat import StringIO
-from pandas import DataFrame
-
import pandas.util.testing as tm
+from pandas import DataFrame
+from pandas.compat import StringIO
class DupeColumnTests(object):
diff --git a/pandas/tests/io/parser/multithread.py b/pandas/tests/io/parser/multithread.py
index 2aaef889db6de..dee7067ea8728 100644
--- a/pandas/tests/io/parser/multithread.py
+++ b/pandas/tests/io/parser/multithread.py
@@ -6,12 +6,13 @@
"""
from __future__ import division
+
from multiprocessing.pool import ThreadPool
import numpy as np
+
import pandas as pd
import pandas.util.testing as tm
-
from pandas import DataFrame
from pandas.compat import BytesIO, range
diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py
index 29aed63e657fb..392f4de2ecaaf 100644
--- a/pandas/tests/io/parser/na_values.py
+++ b/pandas/tests/io/parser/na_values.py
@@ -5,13 +5,12 @@
parsing for all of the parsers defined in parsers.py
"""
-import pytest
import numpy as np
+import pytest
from numpy import nan
import pandas.io.common as com
import pandas.util.testing as tm
-
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, range
diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py
index ae3c806ac1c8e..d893b91226976 100644
--- a/pandas/tests/io/parser/parse_dates.py
+++ b/pandas/tests/io/parser/parse_dates.py
@@ -5,23 +5,21 @@
parsers defined in parsers.py
"""
+from datetime import date, datetime
from distutils.version import LooseVersion
-from datetime import datetime, date
-import pytest
import numpy as np
-from pandas._libs.tslibs import parsing
-from pandas._libs.tslib import Timestamp
-
+import pytest
import pytz
+
import pandas as pd
+import pandas.io.date_converters as conv
import pandas.io.parsers as parsers
import pandas.util.testing as tm
-
-import pandas.io.date_converters as conv
-from pandas import DataFrame, Series, Index, DatetimeIndex, MultiIndex
-from pandas import compat
-from pandas.compat import parse_date, StringIO, lrange
+from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series, compat
+from pandas._libs.tslib import Timestamp
+from pandas._libs.tslibs import parsing
+from pandas.compat import StringIO, lrange, parse_date
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.indexes.datetimes import date_range
diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py
index c0616ebbab4a5..5d2f6b7231a5d 100644
--- a/pandas/tests/io/parser/python_parser_only.py
+++ b/pandas/tests/io/parser/python_parser_only.py
@@ -8,13 +8,13 @@
"""
import csv
+
import pytest
import pandas.util.testing as tm
-from pandas import DataFrame, Index
-from pandas import compat
+from pandas import DataFrame, Index, compat
+from pandas.compat import BytesIO, StringIO, u
from pandas.errors import ParserError
-from pandas.compat import StringIO, BytesIO, u
class PythonParserTests(object):
diff --git a/pandas/tests/io/parser/quoting.py b/pandas/tests/io/parser/quoting.py
index 013e635f80d21..27ee824cd2556 100644
--- a/pandas/tests/io/parser/quoting.py
+++ b/pandas/tests/io/parser/quoting.py
@@ -6,11 +6,11 @@
"""
import csv
-import pandas.util.testing as tm
+import pandas.util.testing as tm
from pandas import DataFrame
-from pandas.errors import ParserError
from pandas.compat import PY3, StringIO, u
+from pandas.errors import ParserError
class QuotingTests(object):
diff --git a/pandas/tests/io/parser/skiprows.py b/pandas/tests/io/parser/skiprows.py
index fb08ec0447267..89a7b5fd0ea1f 100644
--- a/pandas/tests/io/parser/skiprows.py
+++ b/pandas/tests/io/parser/skiprows.py
@@ -10,10 +10,9 @@
import numpy as np
import pandas.util.testing as tm
-
from pandas import DataFrame
+from pandas.compat import StringIO, lrange, range
from pandas.errors import EmptyDataError
-from pandas.compat import StringIO, range, lrange
class SkipRowsTests(object):
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index bfe33980ac617..8b8bf3a339d85 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -6,14 +6,14 @@
"""
import logging
-import pytest
import numpy as np
+import pytest
-import pandas.util.testing as tm
import pandas.util._test_decorators as td
+import pandas.util.testing as tm
from pandas import DataFrame
-from pandas.io.parsers import read_csv
from pandas.compat import BytesIO, StringIO
+from pandas.io.parsers import read_csv
@pytest.mark.network
diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py
index 8535a51657abf..00879c096aa50 100644
--- a/pandas/tests/io/parser/test_parsers.py
+++ b/pandas/tests/io/parser/test_parsers.py
@@ -1,31 +1,32 @@
# -*- coding: utf-8 -*-
import os
+
import pytest
-import pandas.util.testing as tm
-from pandas import read_csv, read_table, DataFrame
import pandas.core.common as com
+import pandas.util.testing as tm
+from pandas import DataFrame, read_csv, read_table
from pandas._libs.tslib import Timestamp
from pandas.compat import StringIO
-from .common import ParserTests
-from .header import HeaderTests
+from .c_parser_only import CParserTests
from .comment import CommentTests
+from .common import ParserTests
+from .compression import CompressionTests
+from .converters import ConverterTests
from .dialect import DialectTests
-from .quoting import QuotingTests
-from .usecols import UsecolsTests
-from .skiprows import SkipRowsTests
+from .dtypes import DtypeTests
+from .header import HeaderTests
from .index_col import IndexColTests
-from .na_values import NAvaluesTests
-from .converters import ConverterTests
-from .c_parser_only import CParserTests
-from .parse_dates import ParseDatesTests
-from .compression import CompressionTests
from .mangle_dupes import DupeColumnTests
from .multithread import MultithreadTests
+from .na_values import NAvaluesTests
+from .parse_dates import ParseDatesTests
from .python_parser_only import PythonParserTests
-from .dtypes import DtypeTests
+from .quoting import QuotingTests
+from .skiprows import SkipRowsTests
+from .usecols import UsecolsTests
class BaseParser(CommentTests, CompressionTests,
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index a60f2b5a4c946..468640ae327bb 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -8,15 +8,14 @@
from datetime import datetime
-import pytest
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-
-from pandas import DataFrame
-from pandas import compat
-from pandas.compat import StringIO, BytesIO
-from pandas.io.parsers import read_csv, read_fwf, EmptyDataError
+from pandas import DataFrame, compat
+from pandas.compat import BytesIO, StringIO
+from pandas.io.parsers import EmptyDataError, read_csv, read_fwf
class TestFwfParsing(object):
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index c7026e3e0fc88..94c69bbbb8d5f 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -5,25 +5,20 @@
is integral to the C engine in parsers.py
"""
-import pytest
-
-from pandas.compat import StringIO, BytesIO, map
-from pandas import compat
-
import os
import sys
-from numpy import nan
import numpy as np
+import pytest
+from numpy import nan
-from pandas import DataFrame
-from pandas.io.parsers import (read_csv, TextFileReader)
-from pandas.util.testing import assert_frame_equal
-
+import pandas._libs.parsers as parser
import pandas.util.testing as tm
-
+from pandas import DataFrame, compat
from pandas._libs.parsers import TextReader
-import pandas._libs.parsers as parser
+from pandas.compat import BytesIO, StringIO, map
+from pandas.io.parsers import TextFileReader, read_csv
+from pandas.util.testing import assert_frame_equal
class TestTextReader(object):
diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py
index 1c64c1516077d..c6fd250116779 100644
--- a/pandas/tests/io/parser/test_unsupported.py
+++ b/pandas/tests/io/parser/test_unsupported.py
@@ -9,15 +9,14 @@
test suite as new feature support is added to the parsers.
"""
+import pytest
+
import pandas.io.parsers as parsers
import pandas.util.testing as tm
-
from pandas.compat import StringIO
from pandas.errors import ParserError
from pandas.io.parsers import read_csv
-import pytest
-
@pytest.fixture(params=["python", "python-fwf"], ids=lambda val: val)
def python_engine(request):
diff --git a/pandas/tests/io/parser/usecols.py b/pandas/tests/io/parser/usecols.py
index db01c20a56e9f..5fd2dc3f48e3e 100644
--- a/pandas/tests/io/parser/usecols.py
+++ b/pandas/tests/io/parser/usecols.py
@@ -5,11 +5,10 @@
for all of the parsers defined in parsers.py
"""
+import numpy as np
import pytest
-import numpy as np
import pandas.util.testing as tm
-
from pandas import DataFrame, Index
from pandas._libs.tslib import Timestamp
from pandas.compat import StringIO
diff --git a/setup.cfg b/setup.cfg
index a5006d66868f6..aa7a31cd1b386 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -388,28 +388,6 @@ skip=
pandas/tests/io/formats/test_css.py,
pandas/tests/io/formats/test_to_latex.py,
pandas/tests/io/formats/test_printing.py,
- pandas/tests/io/parser/skiprows.py,
- pandas/tests/io/parser/test_textreader.py,
- pandas/tests/io/parser/converters.py,
- pandas/tests/io/parser/na_values.py,
- pandas/tests/io/parser/comment.py,
- pandas/tests/io/parser/test_network.py,
- pandas/tests/io/parser/dtypes.py,
- pandas/tests/io/parser/parse_dates.py,
- pandas/tests/io/parser/quoting.py,
- pandas/tests/io/parser/multithread.py,
- pandas/tests/io/parser/index_col.py,
- pandas/tests/io/parser/test_read_fwf.py,
- pandas/tests/io/parser/test_unsupported.py,
- pandas/tests/io/parser/python_parser_only.py,
- pandas/tests/io/parser/test_parsers.py,
- pandas/tests/io/parser/c_parser_only.py,
- pandas/tests/io/parser/dialect.py,
- pandas/tests/io/parser/common.py,
- pandas/tests/io/parser/compression.py,
- pandas/tests/io/parser/usecols.py,
- pandas/tests/io/parser/mangle_dupes.py,
- pandas/tests/io/parser/header.py,
pandas/tests/io/msgpack/test_buffer.py,
pandas/tests/io/msgpack/test_read_size.py,
pandas/tests/io/msgpack/test_pack.py,
| - [x] partial #23334
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Ran `isort --recursive pandas/tests/io/parser` and then checked imports using `isort --recursive --check-only pandas/tests/io/parser`
This PR also exceeded 20 file limit (22 files with setup.cfg), but I think it's acceptable and unnecessary to divide it to two separate PRs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23367 | 2018-10-26T20:01:30Z | 2018-10-28T02:07:57Z | 2018-10-28T02:07:57Z | 2018-10-28T02:08:01Z |
STY: proposed isort settings [ci skip] [skip ci] [ciskip] [skipci] | diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 08c961935a990..a63b3fb53625f 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -10,35 +10,35 @@
import numpy as np
-import pandas._libs.sparse as splib
-import pandas.core.algorithms as algos
-import pandas.core.common as com
-import pandas.io.formats.printing as printing
-from pandas import compat
from pandas._libs import index as libindex, lib
+import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex
from pandas._libs.tslibs import NaT
+import pandas.compat as compat
from pandas.compat.numpy import function as nv
-from pandas.core.accessor import PandasDelegate, delegate_names
-from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
-from pandas.core.base import PandasObject
+from pandas.errors import PerformanceWarning
+
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
astype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
- infer_dtype_from_scalar, maybe_convert_platform
-)
+ infer_dtype_from_scalar, maybe_convert_platform)
from pandas.core.dtypes.common import (
is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_dtype_equal,
is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
- pandas_dtype
-)
+ pandas_dtype)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
- ABCIndexClass, ABCSeries, ABCSparseSeries
-)
+ ABCIndexClass, ABCSeries, ABCSparseSeries)
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
+
+from pandas.core.accessor import PandasDelegate, delegate_names
+import pandas.core.algorithms as algos
+from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
+from pandas.core.base import PandasObject
+import pandas.core.common as com
from pandas.core.missing import interpolate_2d
-from pandas.errors import PerformanceWarning
+
+import pandas.io.formats.printing as printing
# ----------------------------------------------------------------------------
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 9ef30b8fd021f..1bf97690a84ed 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -6,9 +6,10 @@
import types
-from pandas.core.dtypes.common import is_list_like, is_scalar
from pandas.util._decorators import make_signature
+from pandas.core.dtypes.common import is_list_like, is_scalar
+
class GroupByMixin(object):
""" provide the groupby facilities to the mixed object """
diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py
index 3e653704bbace..85f51323a97b5 100644
--- a/pandas/core/groupby/categorical.py
+++ b/pandas/core/groupby/categorical.py
@@ -2,8 +2,7 @@
from pandas.core.algorithms import unique1d
from pandas.core.arrays.categorical import (
- Categorical, CategoricalDtype, _recode_for_categories
-)
+ Categorical, CategoricalDtype, _recode_for_categories)
def recode_for_groupby(c, sort, observed):
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index a832eecf87721..3ed80d266ce4d 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -8,39 +8,40 @@
import collections
import copy
-import warnings
from functools import partial
from textwrap import dedent
+import warnings
import numpy as np
-import pandas.core.algorithms as algorithms
-import pandas.core.common as com
-import pandas.core.indexes.base as ibase
-from pandas import compat
from pandas._libs import Timestamp, lib
+import pandas.compat as compat
from pandas.compat import lzip, map
from pandas.compat.numpy import _np_version_under1p13
-from pandas.core.arrays import Categorical
-from pandas.core.base import DataError, SpecificationError
+from pandas.util._decorators import Appender, Substitution
+
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_int64, ensure_platform_int, is_bool, is_datetimelike,
- is_integer_dtype, is_interval_dtype, is_numeric_dtype, is_scalar
-)
+ is_integer_dtype, is_interval_dtype, is_numeric_dtype, is_scalar)
from pandas.core.dtypes.missing import isna, notna
+
+import pandas.core.algorithms as algorithms
+from pandas.core.arrays import Categorical
+from pandas.core.base import DataError, SpecificationError
+import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
- GroupBy, _apply_docs, _transform_template
-)
+ GroupBy, _apply_docs, _transform_template)
from pandas.core.index import CategoricalIndex, Index, MultiIndex
+import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager, make_block
from pandas.core.panel import Panel
from pandas.core.series import Series
+
from pandas.plotting._core import boxplot_frame_groupby
-from pandas.util._decorators import Appender, Substitution
class NDFrameGroupBy(GroupBy):
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 5acccbf688e30..e31929434b5d6 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -8,35 +8,36 @@ class providing the base-class of operations.
"""
import collections
+from contextlib import contextmanager
import datetime
+from functools import partial, wraps
import types
import warnings
-from contextlib import contextmanager
-from functools import partial, wraps
import numpy as np
-import pandas.core.algorithms as algorithms
-import pandas.core.common as com
-from pandas import compat
from pandas._libs import Timestamp, groupby as libgroupby
+import pandas.compat as compat
from pandas.compat import callable, range, set_function_name, zip
from pandas.compat.numpy import function as nv
-from pandas.core.base import (
- DataError, GroupByError, PandasObject, SelectionMixin, SpecificationError
-)
-from pandas.core.config import option_context
+from pandas.util._decorators import Appender, Substitution, cache_readonly
+from pandas.util._validators import validate_kwargs
+
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import ensure_float, is_numeric_dtype, is_scalar
from pandas.core.dtypes.missing import isna, notna
+
+import pandas.core.algorithms as algorithms
+from pandas.core.base import (
+ DataError, GroupByError, PandasObject, SelectionMixin, SpecificationError)
+import pandas.core.common as com
+from pandas.core.config import option_context
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
-from pandas.util._decorators import Appender, Substitution, cache_readonly
-from pandas.util._validators import validate_kwargs
_doc_template = """
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index cbe87040b8117..b49bc5ee5950f 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -7,22 +7,24 @@
import numpy as np
-import pandas.core.algorithms as algorithms
-import pandas.core.common as com
-from pandas import compat
+import pandas.compat as compat
from pandas.compat import callable, zip
-from pandas.core.arrays import Categorical, ExtensionArray
+from pandas.util._decorators import cache_readonly
+
from pandas.core.dtypes.common import (
ensure_categorical, is_categorical_dtype, is_datetime64_dtype, is_hashable,
- is_list_like, is_scalar, is_timedelta64_dtype
-)
+ is_list_like, is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.generic import ABCSeries
+
+import pandas.core.algorithms as algorithms
+from pandas.core.arrays import Categorical, ExtensionArray
+import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.groupby.ops import BaseGrouper
from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
+
from pandas.io.formats.printing import pprint_thing
-from pandas.util._decorators import cache_readonly
class Grouper(object):
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index af22744c4feec..390334a89cbfe 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -11,18 +11,20 @@
import numpy as np
-import pandas.core.algorithms as algorithms
-import pandas.core.common as com
from pandas._libs import NaT, groupby as libgroupby, iNaT, lib, reduction
from pandas.compat import lzip, range, zip
-from pandas.core.base import SelectionMixin
+from pandas.util._decorators import cache_readonly
+
from pandas.core.dtypes.common import (
ensure_float64, ensure_int64, ensure_int64_or_float64, ensure_object,
ensure_platform_int, is_bool_dtype, is_categorical_dtype, is_complex_dtype,
is_datetime64_any_dtype, is_integer_dtype, is_numeric_dtype,
- is_timedelta64_dtype, needs_i8_conversion
-)
+ is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import _maybe_fill, isna
+
+import pandas.core.algorithms as algorithms
+from pandas.core.base import SelectionMixin
+import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
@@ -30,9 +32,7 @@
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index, decons_obs_group_ids, get_flattened_iterator,
- get_group_index, get_group_index_sorter, get_indexer_dict
-)
-from pandas.util._decorators import cache_readonly
+ get_group_index, get_group_index_sorter, get_indexer_dict)
def generate_bins_generic(values, binner, closed):
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 2b2f9ca51ce12..cce5fda7dba28 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -1,25 +1,26 @@
-import operator
-import warnings
from datetime import timedelta
+import operator
from sys import getsizeof
+import warnings
import numpy as np
-import pandas.core.common as com
-import pandas.core.indexes.base as ibase
-from pandas import compat
from pandas._libs import index as libindex
+import pandas.compat as compat
from pandas.compat import get_range_parameters, lrange, range
from pandas.compat.numpy import function as nv
-from pandas.core import ops
+from pandas.util._decorators import Appender, cache_readonly
+
from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.common import (
- is_int64_dtype, is_integer, is_scalar, is_timedelta64_dtype
-)
+ is_int64_dtype, is_integer, is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.generic import ABCSeries, ABCTimedeltaIndex
+
+from pandas.core import ops
+import pandas.core.common as com
+import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.numeric import Int64Index
-from pandas.util._decorators import Appender, cache_readonly
class RangeIndex(Int64Index):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 64d2e803dc8e1..6cc5acc4a61d0 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3,54 +3,47 @@
"""
from __future__ import division
-import warnings
from textwrap import dedent
+import warnings
import numpy as np
import numpy.ma as ma
-import pandas.core.algorithms as algorithms
-import pandas.core.common as com
-import pandas.core.indexes.base as ibase
-import pandas.core.nanops as nanops
-import pandas.core.ops as ops
-import pandas.io.formats.format as fmt
-import pandas.plotting._core as gfx
-from pandas import compat
from pandas._libs import iNaT, index as libindex, lib, tslibs
+import pandas.compat as compat
from pandas.compat import (
- PY36, OrderedDict, StringIO, get_range_parameters, range, u, zip
-)
+ PY36, OrderedDict, StringIO, get_range_parameters, range, u, zip)
from pandas.compat.numpy import function as nv
-from pandas.core import base, generic
-from pandas.core.accessor import CachedAccessor
-from pandas.core.arrays import ExtensionArray, SparseArray, period_array
-from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
-from pandas.core.arrays.sparse import SparseAccessor
-from pandas.core.config import get_option
+from pandas.util._decorators import Appender, Substitution, deprecate
+from pandas.util._validators import validate_bool_kwarg
+
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar, construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike, infer_dtype_from_scalar,
maybe_cast_to_datetime, maybe_cast_to_integer_array, maybe_castable,
- maybe_convert_platform, maybe_upcast
-)
+ maybe_convert_platform, maybe_upcast)
from pandas.core.dtypes.common import (
_is_unorderable_exception, ensure_platform_int, is_bool,
is_categorical_dtype, is_datetime64tz_dtype, is_datetimelike, is_dict_like,
is_extension_array_dtype, is_extension_type, is_float_dtype, is_hashable,
is_integer, is_integer_dtype, is_iterator, is_list_like, is_object_dtype,
- is_scalar, is_string_like, is_timedelta64_dtype, pandas_dtype
-)
+ is_scalar, is_string_like, is_timedelta64_dtype, pandas_dtype)
from pandas.core.dtypes.generic import (
- ABCDataFrame, ABCIndexClass, ABCSeries, ABCSparseArray, ABCSparseSeries
-)
+ ABCDataFrame, ABCIndexClass, ABCSeries, ABCSparseArray, ABCSparseSeries)
from pandas.core.dtypes.missing import (
- isna, na_value_for_dtype, notna, remove_na_arraylike
-)
+ isna, na_value_for_dtype, notna, remove_na_arraylike)
+
+from pandas.core import algorithms, base, generic, nanops, ops
+from pandas.core.accessor import CachedAccessor
+from pandas.core.arrays import ExtensionArray, SparseArray, period_array
+from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
+from pandas.core.arrays.sparse import SparseAccessor
+import pandas.core.common as com
+from pandas.core.config import get_option
from pandas.core.index import (
- Float64Index, Index, InvalidIndexError, MultiIndex, ensure_index
-)
+ Float64Index, Index, InvalidIndexError, MultiIndex, ensure_index)
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
+import pandas.core.indexes.base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
@@ -58,9 +51,10 @@
from pandas.core.internals import SingleBlockManager
from pandas.core.strings import StringMethods
from pandas.core.tools.datetimes import to_datetime
+
+import pandas.io.formats.format as fmt
from pandas.io.formats.terminal import get_terminal_size
-from pandas.util._decorators import Appender, Substitution, deprecate
-from pandas.util._validators import validate_bool_kwarg
+import pandas.plotting._core as gfx
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 287a03c2e5728..c6108f30a560a 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -1,10 +1,13 @@
""" io on the clipboard """
import warnings
-from pandas import compat, get_option, option_context
+import pandas.compat as compat
from pandas.compat import PY2, PY3, StringIO
+
from pandas.core.dtypes.generic import ABCDataFrame
+from pandas import get_option, option_context
+
def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover
r"""
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 40247c4aebd1e..155cf566b4c40 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -1,20 +1,21 @@
"""Common IO api utilities"""
import codecs
+from contextlib import closing, contextmanager
import csv
import mmap
import os
import zipfile
-from contextlib import closing, contextmanager
-import pandas.core.common as com
-from pandas import compat
+import pandas.compat as compat
from pandas.compat import BytesIO, StringIO, string_types, text_type
-from pandas.core.dtypes.common import is_file_like, is_number
-# compat
from pandas.errors import ( # noqa
- DtypeWarning, EmptyDataError, ParserError, ParserWarning
-)
+ DtypeWarning, EmptyDataError, ParserError, ParserWarning)
+
+from pandas.core.dtypes.common import is_file_like, is_number
+
+import pandas.core.common as com
+
from pandas.io.formats.printing import pprint_thing
# gh-12665: Alias for now and remove later.
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index c6a04b9bdee20..7a7b801f4ba4a 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -2,15 +2,15 @@
Module parse to/from Excel
"""
-import abc
-import os
-import warnings
# ---------------------------------------------------------------------
# ExcelFile class
+import abc
from datetime import MINYEAR, date, datetime, time, timedelta
from distutils.version import LooseVersion
from io import UnsupportedOperation
+import os
from textwrap import fill
+import warnings
import numpy as np
@@ -18,21 +18,21 @@
import pandas.compat as compat
from pandas.compat import (
OrderedDict, add_metaclass, lrange, map, range, reduce, string_types, u,
- zip
-)
-from pandas.core import config
+ zip)
+from pandas.errors import EmptyDataError
+from pandas.util._decorators import Appender, deprecate_kwarg
+
from pandas.core.dtypes.common import (
- is_bool, is_float, is_integer, is_list_like
-)
+ is_bool, is_float, is_integer, is_list_like)
+
+from pandas.core import config
from pandas.core.frame import DataFrame
-from pandas.errors import EmptyDataError
+
from pandas.io.common import (
_NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_arg,
- get_filepath_or_buffer
-)
+ get_filepath_or_buffer)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
-from pandas.util._decorators import Appender, deprecate_kwarg
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 436c16839ffc2..8d2715fe5beed 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -2,8 +2,10 @@
from distutils.version import LooseVersion
-from pandas import DataFrame, Int64Index, RangeIndex
from pandas.compat import range
+
+from pandas import DataFrame, Int64Index, RangeIndex
+
from pandas.io.common import _stringify_path
diff --git a/pandas/io/html.py b/pandas/io/html.py
index a1d3f4589ba35..4f887b69646ee 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -3,19 +3,22 @@
"""
+from distutils.version import LooseVersion
import numbers
import os
import re
-from distutils.version import LooseVersion
-import pandas.core.common as com
-from pandas import Series, compat
+import pandas.compat as compat
from pandas.compat import (
binary_type, iteritems, lmap, lrange, raise_with_traceback, string_types,
- u
-)
-from pandas.core.dtypes.common import is_list_like
+ u)
from pandas.errors import EmptyDataError
+
+from pandas.core.dtypes.common import is_list_like
+
+from pandas import Series
+import pandas.core.common as com
+
from pandas.io.common import _is_url, _validate_header_arg, urlopen
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
diff --git a/pandas/io/packers.py b/pandas/io/packers.py
index 764e27a60abb5..ea673101e90b3 100644
--- a/pandas/io/packers.py
+++ b/pandas/io/packers.py
@@ -38,35 +38,36 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
-import os
-import warnings
from datetime import date, datetime, timedelta
+import os
from textwrap import dedent
+import warnings
-import numpy as np
from dateutil.parser import parse
+import numpy as np
+
+import pandas.compat as compat
+from pandas.compat import u, u_safe
+from pandas.errors import PerformanceWarning
+from pandas.util._move import (
+ BadMove as _BadMove, move_into_mutable_buffer as _move_into_mutable_buffer)
+
+from pandas.core.dtypes.common import (
+ is_categorical_dtype, is_object_dtype, needs_i8_conversion, pandas_dtype)
from pandas import ( # noqa:F401
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Float64Index,
Index, Int64Index, Interval, IntervalIndex, MultiIndex, NaT, Panel, Period,
- PeriodIndex, RangeIndex, Series, TimedeltaIndex, Timestamp, compat
-)
-from pandas.compat import u, u_safe
+ PeriodIndex, RangeIndex, Series, TimedeltaIndex, Timestamp)
from pandas.core import internals
from pandas.core.arrays import IntervalArray, PeriodArray
from pandas.core.arrays.sparse import BlockIndex, IntIndex
-from pandas.core.dtypes.common import (
- is_categorical_dtype, is_object_dtype, needs_i8_conversion, pandas_dtype
-)
from pandas.core.generic import NDFrame
from pandas.core.internals import BlockManager, _safe_reshape, make_block
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
-from pandas.errors import PerformanceWarning
+
from pandas.io.common import _stringify_path, get_filepath_or_buffer
from pandas.io.msgpack import ExtType, Packer as _Packer, Unpacker as _Unpacker
-from pandas.util._move import (
- BadMove as _BadMove, move_into_mutable_buffer as _move_into_mutable_buffer
-)
# check which compression libs we have installed
try:
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index aef1d84a19bc7..2c75f46385e86 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -3,9 +3,11 @@
from distutils.version import LooseVersion
from warnings import catch_warnings
-import pandas.core.common as com
-from pandas import DataFrame, Int64Index, RangeIndex, get_option
from pandas.compat import string_types
+
+from pandas import DataFrame, Int64Index, RangeIndex, get_option
+import pandas.core.common as com
+
from pandas.io.common import get_filepath_or_buffer, is_s3_url
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 8dd50fceb4efb..cd9d3ccb79af8 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -3,49 +3,48 @@
"""
from __future__ import print_function
+from collections import defaultdict
import csv
import datetime
import re
import sys
-import warnings
-from collections import defaultdict
from textwrap import fill
+import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas._libs.parsers as parsers
-import pandas.core.common as com
-from pandas import compat
from pandas._libs.tslibs import parsing
+import pandas.compat as compat
from pandas.compat import (
- PY3, StringIO, lrange, lzip, map, range, string_types, u, zip
-)
-from pandas.core import algorithms
-from pandas.core.arrays import Categorical
+ PY3, StringIO, lrange, lzip, map, range, string_types, u, zip)
+from pandas.errors import EmptyDataError, ParserError, ParserWarning
+from pandas.util._decorators import Appender
+
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
ensure_object, is_categorical_dtype, is_dtype_equal, is_float, is_integer,
is_integer_dtype, is_list_like, is_object_dtype, is_scalar,
- is_string_dtype
-)
+ is_string_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
+
+from pandas.core import algorithms
+from pandas.core.arrays import Categorical
+import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.index import (
- Index, MultiIndex, RangeIndex, ensure_index_from_sequences
-)
+ Index, MultiIndex, RangeIndex, ensure_index_from_sequences)
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
-from pandas.errors import EmptyDataError, ParserError, ParserWarning
+
from pandas.io.common import (
_NA_VALUES, BaseIterator, UnicodeReader, UTF8Recoder, _get_handle,
_infer_compression, _validate_header_arg, get_filepath_or_buffer,
- is_file_like
-)
+ is_file_like)
from pandas.io.date_converters import generic_parser
-from pandas.util._decorators import Appender
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 4e71462f4e331..c89d1df8ee64b 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -5,7 +5,9 @@
from numpy.lib.format import read_array, write_array
from pandas.compat import PY3, BytesIO, cPickle as pkl, pickle_compat as pc
+
from pandas.core.dtypes.common import _NS_DTYPE, is_datetime64_dtype
+
from pandas.io.common import _get_handle, _stringify_path
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 0830aece8c6f4..56b63fddd96ad 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -5,46 +5,45 @@
"""
import copy
+from datetime import date, datetime
+from distutils.version import LooseVersion
import itertools
import os
import re
import time
import warnings
-from datetime import date, datetime
-from distutils.version import LooseVersion
import numpy as np
-import pandas.core.common as com
-from pandas import (
- DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, Panel,
- PeriodIndex, Series, SparseDataFrame, SparseSeries, TimedeltaIndex, compat,
- concat, isna, to_datetime
-)
from pandas._libs import algos, lib, writers as libwriters
from pandas._libs.tslibs import timezones
from pandas.compat import PY3, filter, lrange, range, string_types
+from pandas.errors import PerformanceWarning
+
+from pandas.core.dtypes.common import (
+ ensure_int64, ensure_object, ensure_platform_int, is_categorical_dtype,
+ is_datetime64_dtype, is_datetime64tz_dtype, is_list_like,
+ is_timedelta64_dtype)
+from pandas.core.dtypes.missing import array_equivalent
+
+from pandas import (
+ DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, Panel,
+ PeriodIndex, Series, SparseDataFrame, SparseSeries, TimedeltaIndex, compat,
+ concat, isna, to_datetime)
from pandas.core import config
from pandas.core.algorithms import match, unique
from pandas.core.arrays.categorical import (
- Categorical, _factorize_from_iterables
-)
+ Categorical, _factorize_from_iterables)
from pandas.core.arrays.sparse import BlockIndex, IntIndex
from pandas.core.base import StringMixin
+import pandas.core.common as com
from pandas.core.computation.pytables import Expr, maybe_expression
from pandas.core.config import get_option
-from pandas.core.dtypes.common import (
- ensure_int64, ensure_object, ensure_platform_int, is_categorical_dtype,
- is_datetime64_dtype, is_datetime64tz_dtype, is_list_like,
- is_timedelta64_dtype
-)
-from pandas.core.dtypes.missing import array_equivalent
from pandas.core.index import ensure_index
from pandas.core.internals import (
BlockManager, _block2d_to_blocknd, _block_shape, _factor_indexer,
- make_block
-)
-from pandas.errors import PerformanceWarning
+ make_block)
+
from pandas.io.common import _stringify_path
from pandas.io.formats.printing import adjoin, pprint_thing
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index c326580487b09..6a38e3d2eb783 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -8,16 +8,18 @@
https://support.sas.com/techsup/technote/ts140.pdf
"""
+from datetime import datetime
import struct
import warnings
-from datetime import datetime
import numpy as np
+from pandas.util._decorators import Appender
+
import pandas as pd
from pandas import compat
+
from pandas.io.common import BaseIterator, get_filepath_or_buffer
-from pandas.util._decorators import Appender
_correct_line1 = ("HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index d72996a8e6157..2da3775d5a6a7 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -2,6 +2,7 @@
Read SAS sas7bdat or xport files.
"""
from pandas import compat
+
from pandas.io.common import _stringify_path
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 3876792d6226f..00fbc35ed1e7d 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -6,24 +6,24 @@
from __future__ import division, print_function
-import re
-import warnings
from contextlib import contextmanager
from datetime import date, datetime, time
+import re
+import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas.compat import (
- map, raise_with_traceback, string_types, text_type, zip
-)
-from pandas.core.api import DataFrame, Series
-from pandas.core.base import PandasObject
+ map, raise_with_traceback, string_types, text_type, zip)
+
from pandas.core.dtypes.common import (
- is_datetime64tz_dtype, is_dict_like, is_list_like
-)
+ is_datetime64tz_dtype, is_dict_like, is_list_like)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
+
+from pandas.core.api import DataFrame, Series
+from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 00d1a92587930..df0d47b063411 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -10,33 +10,33 @@
http://www.statsmodels.org/devel/
"""
+from collections import OrderedDict
import datetime
import struct
import sys
import warnings
-from collections import OrderedDict
-import numpy as np
from dateutil.relativedelta import relativedelta
+import numpy as np
-from pandas import DatetimeIndex, compat, isna, to_datetime, to_timedelta
from pandas._libs.lib import infer_dtype
from pandas._libs.tslibs import NaT, Timestamp
from pandas._libs.writers import max_len_string_array
from pandas.compat import (
- BytesIO, lmap, lrange, lzip, range, string_types, text_type, zip
-)
+ BytesIO, lmap, lrange, lzip, range, string_types, text_type, zip)
+from pandas.util._decorators import Appender, deprecate_kwarg
+
+from pandas.core.dtypes.common import (
+ ensure_object, is_categorical_dtype, is_datetime64_dtype)
+
+from pandas import DatetimeIndex, compat, isna, to_datetime, to_timedelta
from pandas.core.arrays import Categorical
from pandas.core.base import StringMixin
-from pandas.core.dtypes.common import (
- ensure_object, is_categorical_dtype, is_datetime64_dtype
-)
from pandas.core.frame import DataFrame
from pandas.core.series import Series
+
from pandas.io.common import (
- BaseIterator, _stringify_path, get_filepath_or_buffer
-)
-from pandas.util._decorators import Appender, deprecate_kwarg
+ BaseIterator, _stringify_path, get_filepath_or_buffer)
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py
index 18cce6573817c..0f292a457bbc2 100644
--- a/pandas/tests/arrays/categorical/test_analytics.py
+++ b/pandas/tests/arrays/categorical/test_analytics.py
@@ -5,10 +5,11 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
-from pandas import Categorical, Index, Series
from pandas.compat import PYPY
+from pandas import Categorical, Index, Series
+import pandas.util.testing as tm
+
class TestCategoricalAnalytics(object):
diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py
index 1deef4762be26..54de398473d52 100644
--- a/pandas/tests/arrays/categorical/test_api.py
+++ b/pandas/tests/arrays/categorical/test_api.py
@@ -3,10 +3,10 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series
from pandas.core.arrays.categorical import _recode_for_categories
from pandas.tests.arrays.categorical.common import TestCategorical
+import pandas.util.testing as tm
class TestCategoricalAPI(object):
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index b53fd0aa80344..8bd245d2aabae 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -5,15 +5,15 @@
import numpy as np
import pytest
+from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
+from pandas.core.dtypes.dtypes import CategoricalDtype
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import (
Categorical, CategoricalIndex, DatetimeIndex, Index, Interval,
IntervalIndex, NaT, Series, Timestamp, date_range, period_range,
- timedelta_range
-)
-from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
-from pandas.core.dtypes.dtypes import CategoricalDtype
+ timedelta_range)
+import pandas.util.testing as tm
class TestCategoricalConstructors(object):
diff --git a/pandas/tests/arrays/categorical/test_dtypes.py b/pandas/tests/arrays/categorical/test_dtypes.py
index 7d4824fe7d18a..491a7867fee71 100644
--- a/pandas/tests/arrays/categorical/test_dtypes.py
+++ b/pandas/tests/arrays/categorical/test_dtypes.py
@@ -2,11 +2,13 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
-from pandas import Categorical, CategoricalIndex, Index, Series, Timestamp
from pandas.compat import long
+
from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas import Categorical, CategoricalIndex, Index, Series, Timestamp
+import pandas.util.testing as tm
+
class TestCategoricalDtypes(object):
diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py
index d74a7d5d2ed5d..a54ee7381f9eb 100644
--- a/pandas/tests/arrays/categorical/test_indexing.py
+++ b/pandas/tests/arrays/categorical/test_indexing.py
@@ -3,10 +3,10 @@
import numpy as np
import pytest
-import pandas.core.common as com
-import pandas.util.testing as tm
from pandas import Categorical, CategoricalIndex, Index, PeriodIndex, Series
+import pandas.core.common as com
from pandas.tests.arrays.categorical.common import TestCategorical
+import pandas.util.testing as tm
class TestCategoricalIndexingWithFactor(TestCategorical):
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index a4b39846cbfaf..ce15ebfb281f2 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -4,9 +4,9 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import Categorical, DataFrame, Series, date_range
from pandas.tests.arrays.categorical.common import TestCategorical
+import pandas.util.testing as tm
class TestCategoricalOpsWithFactor(TestCategorical):
diff --git a/pandas/tests/arrays/categorical/test_repr.py b/pandas/tests/arrays/categorical/test_repr.py
index 3c830ee6f6da5..5f71d0148ee88 100644
--- a/pandas/tests/arrays/categorical/test_repr.py
+++ b/pandas/tests/arrays/categorical/test_repr.py
@@ -2,11 +2,11 @@
import numpy as np
+from pandas.compat import PY3, u
+
from pandas import (
Categorical, CategoricalIndex, Series, date_range, period_range,
- timedelta_range
-)
-from pandas.compat import PY3, u
+ timedelta_range)
from pandas.core.config import option_context
from pandas.tests.arrays.categorical.common import TestCategorical
diff --git a/pandas/tests/arrays/categorical/test_subclass.py b/pandas/tests/arrays/categorical/test_subclass.py
index 08ebb7d1b05f7..7e90f8d51a3ef 100644
--- a/pandas/tests/arrays/categorical/test_subclass.py
+++ b/pandas/tests/arrays/categorical/test_subclass.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-import pandas.util.testing as tm
from pandas import Categorical
+import pandas.util.testing as tm
class TestCategoricalSubclassing(object):
diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py
index ff69b68f1117c..9a191dda3a73a 100644
--- a/pandas/tests/arrays/interval/test_interval.py
+++ b/pandas/tests/arrays/interval/test_interval.py
@@ -2,9 +2,9 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
from pandas import Index, IntervalIndex, date_range, timedelta_range
from pandas.core.arrays import IntervalArray
+import pandas.util.testing as tm
@pytest.fixture(params=[
diff --git a/pandas/tests/arrays/interval/test_ops.py b/pandas/tests/arrays/interval/test_ops.py
index 7000ff0f0c3f6..45bf465577ace 100644
--- a/pandas/tests/arrays/interval/test_ops.py
+++ b/pandas/tests/arrays/interval/test_ops.py
@@ -2,9 +2,9 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
from pandas import Interval, IntervalIndex, Timedelta, Timestamp
from pandas.core.arrays import IntervalArray
+import pandas.util.testing as tm
@pytest.fixture(params=[IntervalArray, IntervalIndex])
diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py
index 2aa24fff3d1d7..42a29654b44d5 100644
--- a/pandas/tests/arrays/sparse/test_arithmetics.py
+++ b/pandas/tests/arrays/sparse/test_arithmetics.py
@@ -4,8 +4,8 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
+import pandas.util.testing as tm
class TestSparseArrayArithmetics(object):
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index 45c23735a986b..852c4fb910560 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -3,15 +3,16 @@
import warnings
import numpy as np
-import pytest
from numpy import nan
+import pytest
-import pandas as pd
-import pandas.util._test_decorators as td
-import pandas.util.testing as tm
from pandas._libs.sparse import IntIndex
from pandas.compat import range
+import pandas.util._test_decorators as td
+
+import pandas as pd
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
+import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
diff --git a/pandas/tests/arrays/sparse/test_dtype.py b/pandas/tests/arrays/sparse/test_dtype.py
index d3e2116882739..75fc325b07a08 100644
--- a/pandas/tests/arrays/sparse/test_dtype.py
+++ b/pandas/tests/arrays/sparse/test_dtype.py
@@ -2,8 +2,8 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
+import pandas.util.testing as tm
@pytest.mark.parametrize("dtype, fill_value", [
diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py
index e6ebd92c6ffdc..cbad7e8e9136d 100644
--- a/pandas/tests/arrays/sparse/test_libsparse.py
+++ b/pandas/tests/arrays/sparse/test_libsparse.py
@@ -5,9 +5,10 @@
import pandas._libs.sparse as splib
import pandas.util._test_decorators as td
-import pandas.util.testing as tm
+
from pandas import Series
from pandas.core.arrays.sparse import BlockIndex, IntIndex, _make_index
+import pandas.util.testing as tm
TEST_LENGTH = 20
diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py
index dcbb0d4048b0f..40bdd4f1aaa5c 100644
--- a/pandas/tests/arrays/test_period.py
+++ b/pandas/tests/arrays/test_period.py
@@ -1,14 +1,16 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
from pandas._libs.tslibs import iNaT
from pandas._libs.tslibs.period import IncompatibleFrequency
-from pandas.core.arrays import PeriodArray, period_array
+
from pandas.core.dtypes.common import pandas_dtype
from pandas.core.dtypes.dtypes import PeriodDtype
+import pandas as pd
+from pandas.core.arrays import PeriodArray, period_array
+import pandas.util.testing as tm
+
# ----------------------------------------------------------------------------
# Constructors
diff --git a/pandas/tests/extension/arrow/bool.py b/pandas/tests/extension/arrow/bool.py
index e6f997b01aad2..f8e357e162232 100644
--- a/pandas/tests/extension/arrow/bool.py
+++ b/pandas/tests/extension/arrow/bool.py
@@ -13,8 +13,7 @@
import pandas as pd
from pandas.api.extensions import (
- ExtensionArray, ExtensionDtype, register_extension_dtype, take
-)
+ ExtensionArray, ExtensionDtype, register_extension_dtype, take)
@register_extension_dtype
diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py
index fdd2b99d9b3c7..076be53a4a72f 100644
--- a/pandas/tests/extension/base/constructors.py
+++ b/pandas/tests/extension/base/constructors.py
@@ -1,8 +1,8 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas.core.internals import ExtensionBlock
+import pandas.util.testing as tm
from .base import BaseExtensionTests
diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py
index 91b1b87a9d7ea..00a480d311b58 100644
--- a/pandas/tests/extension/base/interface.py
+++ b/pandas/tests/extension/base/interface.py
@@ -1,10 +1,12 @@
import numpy as np
-import pandas as pd
from pandas.compat import StringIO
+
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes.dtypes import ExtensionDtype
+import pandas as pd
+
from .base import BaseExtensionTests
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index e47b8049ae65c..c14bfa359bc64 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -5,9 +5,10 @@
import numpy as np
+from pandas.core.dtypes.base import ExtensionDtype
+
import pandas as pd
from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin
-from pandas.core.dtypes.base import ExtensionDtype
class DecimalDtype(ExtensionDtype):
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 59e77c3f0f3f3..1c9beefe9e542 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -5,9 +5,9 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import compat
from pandas.tests.extension import base
+import pandas.util.testing as tm
from .array import DecimalArray, DecimalDtype, make_data, to_decimal
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 75b0f6b02edad..2c6e74fda8a0e 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -19,9 +19,10 @@
import numpy as np
+from pandas.core.dtypes.base import ExtensionDtype
+
from pandas import compat
from pandas.core.arrays import ExtensionArray
-from pandas.core.dtypes.base import ExtensionDtype
class JSONDtype(ExtensionDtype):
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index d6528e3085527..778432376e092 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -3,10 +3,11 @@
import pytest
-import pandas as pd
-import pandas.util.testing as tm
from pandas.compat import PY2, PY36
+
+import pandas as pd
from pandas.tests.extension import base
+import pandas.util.testing as tm
from .array import JSONArray, JSONDtype, make_data
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index b5ded1890ae64..65e6a15dd8df0 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -19,10 +19,10 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import Categorical
from pandas.api.types import CategoricalDtype
from pandas.tests.extension import base
+import pandas.util.testing as tm
def make_data():
diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py
index 8a026a0c38283..2bc4bf5df2298 100644
--- a/pandas/tests/extension/test_common.py
+++ b/pandas/tests/extension/test_common.py
@@ -1,12 +1,13 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes import dtypes
from pandas.core.dtypes.common import is_extension_array_dtype
+import pandas as pd
+from pandas.core.arrays import ExtensionArray
+import pandas.util.testing as tm
+
class DummyDtype(dtypes.ExtensionDtype):
pass
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index 680cf83c64b9e..efee647389884 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -16,13 +16,13 @@
import numpy as np
import pytest
+from pandas.core.dtypes.common import is_extension_array_dtype
+
import pandas as pd
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import (
Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype,
- UInt32Dtype, UInt64Dtype
-)
-from pandas.core.dtypes.common import is_extension_array_dtype
+ UInt32Dtype, UInt64Dtype)
from pandas.tests.extension import base
diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py
index 3ffb88ef302e9..3154f34434ce2 100644
--- a/pandas/tests/extension/test_interval.py
+++ b/pandas/tests/extension/test_interval.py
@@ -16,11 +16,12 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
+from pandas.core.dtypes.dtypes import IntervalDtype
+
from pandas import Interval
from pandas.core.arrays import IntervalArray
-from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.tests.extension import base
+import pandas.util.testing as tm
def make_data():
diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py
index 6f59cbb66a145..83f30aed88e65 100644
--- a/pandas/tests/extension/test_period.py
+++ b/pandas/tests/extension/test_period.py
@@ -1,12 +1,14 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
from pandas._libs.tslib import iNaT
-from pandas.core.arrays import PeriodArray
+
from pandas.core.dtypes.dtypes import PeriodDtype
+
+import pandas as pd
+from pandas.core.arrays import PeriodArray
from pandas.tests.extension import base
+import pandas.util.testing as tm
@pytest.fixture
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index bec969020559c..468b1610a9142 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -3,19 +3,21 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import (
- CategoricalIndex, DatetimeIndex, Float64Index, Index, Int64Index,
- IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex,
- UInt64Index, compat, isna
-)
from pandas._libs.tslib import iNaT
+import pandas.compat as compat
from pandas.compat import PY3
+
from pandas.core.dtypes.common import needs_i8_conversion
from pandas.core.dtypes.dtypes import CategoricalDtype
+
+import pandas as pd
+from pandas import (
+ CategoricalIndex, DatetimeIndex, Float64Index, Index, Int64Index,
+ IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex,
+ UInt64Index, isna)
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
+import pandas.util.testing as tm
class Base(object):
diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py
index 4a2a4a7deb5cc..8cfed33a96ac5 100644
--- a/pandas/tests/indexes/conftest.py
+++ b/pandas/tests/indexes/conftest.py
@@ -1,10 +1,11 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
from pandas.compat import long, lzip
+
+import pandas as pd
from pandas.core.indexes.api import Index, MultiIndex
+import pandas.util.testing as tm
@pytest.fixture(params=[tm.makeUnicodeIndex(100),
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py
index 038c4f786f69f..de51120baeb58 100644
--- a/pandas/tests/indexes/datetimes/test_arithmetic.py
+++ b/pandas/tests/indexes/datetimes/test_arithmetic.py
@@ -4,10 +4,11 @@
import pytest
import pytz
+from pandas.errors import NullFrequencyError
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import DatetimeIndex, Series, date_range
-from pandas.errors import NullFrequencyError
+import pandas.util.testing as tm
class TestDatetimeIndexArithmetic(object):
diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py
index 8895624d74e89..a9cfc551e073b 100644
--- a/pandas/tests/indexes/datetimes/test_astype.py
+++ b/pandas/tests/indexes/datetimes/test_astype.py
@@ -1,17 +1,16 @@
from datetime import datetime
import dateutil
+from dateutil.tz import tzlocal
import numpy as np
import pytest
import pytz
-from dateutil.tz import tzlocal
import pandas as pd
-import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Index, Int64Index, NaT, Period, Series, Timestamp,
- date_range
-)
+ date_range)
+import pandas.util.testing as tm
class TestDatetimeIndex(object):
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index 2fc3e29a47b20..7a251a8ecfb28 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -6,14 +6,14 @@
import pytest
import pytz
+from pandas._libs.tslib import OutOfBoundsDatetime
+from pandas._libs.tslibs import conversion
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
- to_datetime
-)
-from pandas._libs.tslib import OutOfBoundsDatetime
-from pandas._libs.tslibs import conversion
+ to_datetime)
+import pandas.util.testing as tm
class TestDatetimeIndex(object):
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 450d7643bfbd5..b6bab272c8c0a 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -9,17 +9,17 @@
import pytz
from pytz import timezone
-import pandas as pd
-import pandas.util._test_decorators as td
-import pandas.util.testing as tm
-from pandas import (
- DatetimeIndex, Timestamp, bdate_range, compat, date_range, offsets
-)
+import pandas.compat as compat
from pandas.errors import OutOfBoundsDatetime
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
from pandas.tests.series.common import TestData
+import pandas.util.testing as tm
+
from pandas.tseries.offsets import (
- BDay, CDay, DateOffset, MonthEnd, generate_range, prefix_mapping
-)
+ BDay, CDay, DateOffset, MonthEnd, generate_range, prefix_mapping)
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 84214b331e8af..cea56bf803083 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -1,16 +1,16 @@
-import sys
from datetime import date
+import sys
import dateutil
import numpy as np
import pytest
+from pandas.compat import lrange
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import (
- DataFrame, DatetimeIndex, Index, Timestamp, date_range, offsets
-)
-from pandas.compat import lrange
+ DataFrame, DatetimeIndex, Index, Timestamp, date_range, offsets)
+import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py
index 4279c582e9777..d8e4104919e55 100644
--- a/pandas/tests/indexes/datetimes/test_formats.py
+++ b/pandas/tests/indexes/datetimes/test_formats.py
@@ -6,8 +6,8 @@
import pytz
import pandas as pd
-import pandas.util.testing as tm
from pandas import DatetimeIndex, Series
+import pandas.util.testing as tm
def test_to_native_types():
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index cb2b0dfe5d11c..b66475612fe40 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -4,10 +4,12 @@
import pytest
import pytz
-import pandas as pd
import pandas.compat as compat
-import pandas.util.testing as tm
+
+import pandas as pd
from pandas import DatetimeIndex, Index, Timestamp, date_range, notna
+import pandas.util.testing as tm
+
from pandas.tseries.offsets import BDay, CDay
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 00d816459626b..6d6f13bb763f6 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -6,10 +6,9 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import (
- DatetimeIndex, Index, Timestamp, compat, date_range, datetime, offsets
-)
+ DatetimeIndex, Index, Timestamp, compat, date_range, datetime, offsets)
+import pandas.util.testing as tm
class TestTimeSeries(object):
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 086c687148292..2cb7482cda617 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -1,17 +1,18 @@
-import warnings
from datetime import datetime
+import warnings
import numpy as np
import pytest
+from pandas.core.dtypes.generic import ABCDateOffset
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Index, PeriodIndex, Series, Timestamp, bdate_range,
- date_range
-)
-from pandas.core.dtypes.generic import ABCDateOffset
+ date_range)
from pandas.tests.test_base import Ops
+import pandas.util.testing as tm
+
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index ae50ccedd7917..27e53c15238be 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -1,15 +1,14 @@
""" test partial slicing on Series/Frame """
-import operator as op
from datetime import datetime
+import operator as op
import numpy as np
import pytest
import pandas as pd
from pandas import (
- DataFrame, DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range
-)
+ DataFrame, DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range)
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index dac2df520c8e4..b644cb5844d9b 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -8,8 +8,9 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import DatetimeIndex, Timestamp, date_range
+import pandas.util.testing as tm
+
from pandas.tseries.frequencies import to_offset
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index 896d1cfd0094c..d72bf275463ac 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -3,13 +3,14 @@
import numpy as np
import pytest
-import pandas as pd
import pandas.util._test_decorators as td
-import pandas.util.testing as tm
+
+import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, Int64Index, Series, bdate_range,
- date_range, to_datetime
-)
+ date_range, to_datetime)
+import pandas.util.testing as tm
+
from pandas.tseries.offsets import BMonthEnd, Minute, MonthEnd
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 6309206f48b1a..9ad540b174438 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -6,20 +6,20 @@
from distutils.version import LooseVersion
import dateutil
+from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
-from dateutil.tz import gettz, tzlocal
-import pandas as pd
+from pandas._libs.tslibs import conversion, timezones
+from pandas.compat import PY3, lrange, zip
import pandas.util._test_decorators as td
-import pandas.util.testing as tm
+
+import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, bdate_range, date_range, isna,
- to_datetime
-)
-from pandas._libs.tslibs import conversion, timezones
-from pandas.compat import PY3, lrange, zip
+ to_datetime)
+import pandas.util.testing as tm
class FixedOffset(tzinfo):
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 428888a0e366d..ba18f9b34574d 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -1,29 +1,30 @@
""" test to_datetime """
import calendar
-import locale
from datetime import datetime, time
from distutils.version import LooseVersion
+import locale
import dateutil
+from dateutil.parser import parse
+from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
-from dateutil.parser import parse
-from dateutil.tz.tz import tzoffset
-import pandas as pd
-import pandas.util._test_decorators as td
-from pandas import (
- DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, compat,
- date_range, isna, to_datetime
-)
from pandas._libs import tslib
from pandas._libs.tslibs import parsing
from pandas.compat import PY3, lmap
+from pandas.errors import OutOfBoundsDatetime
+import pandas.util._test_decorators as td
+
from pandas.core.dtypes.common import is_datetime64_ns_dtype
+
+import pandas as pd
+from pandas import (
+ DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, compat,
+ date_range, isna, to_datetime)
from pandas.core.tools import datetimes as tools
-from pandas.errors import OutOfBoundsDatetime
from pandas.util import testing as tm
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py
index ef3f7ddbbf81d..f05b53522fa31 100644
--- a/pandas/tests/indexes/multi/test_compat.py
+++ b/pandas/tests/indexes/multi/test_compat.py
@@ -4,10 +4,11 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
-from pandas import MultiIndex
from pandas.compat import PY3, long
+from pandas import MultiIndex
+import pandas.util.testing as tm
+
def test_numeric_compat(idx):
tm.assert_raises_regex(TypeError, "cannot perform __mul__",
diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py
index ca9b2766f3798..833de283e5367 100644
--- a/pandas/tests/indexes/multi/test_constructor.py
+++ b/pandas/tests/indexes/multi/test_constructor.py
@@ -5,13 +5,15 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import Index, MultiIndex, date_range
from pandas._libs.tslib import Timestamp
from pandas.compat import lrange, range
+
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
+import pandas as pd
+from pandas import Index, MultiIndex, date_range
+import pandas.util.testing as tm
+
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py
index c846af3e7cfbe..deff6aacf8f9c 100644
--- a/pandas/tests/indexes/multi/test_contains.py
+++ b/pandas/tests/indexes/multi/test_contains.py
@@ -3,10 +3,11 @@
import numpy as np
import pytest
+from pandas.compat import PYPY
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import MultiIndex
-from pandas.compat import PYPY
+import pandas.util.testing as tm
def test_contains_top_level():
diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py
index 99a5bcc1c217b..0d09e3ef2e4b1 100644
--- a/pandas/tests/indexes/multi/test_copy.py
+++ b/pandas/tests/indexes/multi/test_copy.py
@@ -4,8 +4,8 @@
import pytest
-import pandas.util.testing as tm
from pandas import MultiIndex
+import pandas.util.testing as tm
def assert_multiindex_copied(copy, original):
diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py
index e3af327ededa2..41cb2409f0532 100644
--- a/pandas/tests/indexes/multi/test_equivalence.py
+++ b/pandas/tests/indexes/multi/test_equivalence.py
@@ -3,10 +3,11 @@
import numpy as np
+from pandas.compat import lrange, lzip, range
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import Index, MultiIndex, Series
-from pandas.compat import lrange, lzip, range
+import pandas.util.testing as tm
def test_equals(idx):
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index cac32ce7040b4..e72b76ed07269 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -4,10 +4,11 @@
import numpy as np
import pytest
+from pandas.compat import range
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import CategoricalIndex, Index, MultiIndex
-from pandas.compat import range
+import pandas.util.testing as tm
def assert_matching(actual, expected, check_dtype=False):
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index ec83ae4766ab0..2b5f16b0ea0c8 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -6,14 +6,14 @@
import numpy as np
import pytest
+from pandas.compat import lrange
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import (
Categorical, CategoricalIndex, Index, IntervalIndex, MultiIndex,
- date_range
-)
-from pandas.compat import lrange
+ date_range)
from pandas.core.indexes.base import InvalidIndexError
+import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index b2f12405eb195..80e2b811ac062 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -2,12 +2,13 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, RangeIndex
from pandas.compat import lrange
from pandas.errors import PerformanceWarning, UnsortedIndexError
+import pandas as pd
+from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, RangeIndex
+import pandas.util.testing as tm
+
def test_sortlevel(idx):
import random
diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py
index 5864bc0cefcbc..2c04f22f8b450 100644
--- a/pandas/tests/indexes/period/test_arithmetic.py
+++ b/pandas/tests/indexes/period/test_arithmetic.py
@@ -4,8 +4,8 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import PeriodIndex
+import pandas.util.testing as tm
class TestPeriodIndexArithmetic(object):
diff --git a/pandas/tests/indexes/period/test_astype.py b/pandas/tests/indexes/period/test_astype.py
index f254cf2a9cae8..f7c2bf3d6bf4f 100644
--- a/pandas/tests/indexes/period/test_astype.py
+++ b/pandas/tests/indexes/period/test_astype.py
@@ -4,8 +4,8 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import Index, Int64Index, NaT, Period, PeriodIndex, period_range
+import pandas.util.testing as tm
class TestPeriodIndexAsType(object):
diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py
index cfc6c3b8b0a2e..9622f47697f8d 100644
--- a/pandas/tests/indexes/period/test_construction.py
+++ b/pandas/tests/indexes/period/test_construction.py
@@ -1,14 +1,15 @@
import numpy as np
import pytest
+from pandas.compat import PY3, lmap, lrange, text_type
+
+from pandas.core.dtypes.dtypes import PeriodDtype
+
import pandas as pd
+from pandas import (
+ Index, Period, PeriodIndex, Series, date_range, offsets, period_range)
import pandas.core.indexes.period as period
import pandas.util.testing as tm
-from pandas import (
- Index, Period, PeriodIndex, Series, date_range, offsets, period_range
-)
-from pandas.compat import PY3, lmap, lrange, text_type
-from pandas.core.dtypes.dtypes import PeriodDtype
class TestPeriodIndex(object):
diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py
index 87211d091e4ea..d4035efa2b866 100644
--- a/pandas/tests/indexes/period/test_formats.py
+++ b/pandas/tests/indexes/period/test_formats.py
@@ -2,8 +2,8 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import PeriodIndex
+import pandas.util.testing as tm
def test_to_native_types():
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index b720f56eeab93..fafba144bb148 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -3,13 +3,13 @@
import numpy as np
import pytest
-import pandas as pd
-from pandas import (
- DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
-)
from pandas._libs import tslibs
from pandas._libs.tslibs import period as libperiod
from pandas.compat import lrange
+
+import pandas as pd
+from pandas import (
+ DatetimeIndex, Period, PeriodIndex, Series, notna, period_range)
from pandas.util import testing as tm
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index 80550292ed4f8..516eb8971abaf 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -2,12 +2,13 @@
import numpy as np
import pytest
-import pandas as pd
import pandas._libs.tslib as tslib
-import pandas.util.testing as tm
+
+import pandas as pd
from pandas import DatetimeIndex, Index, Period, PeriodIndex, Series
from pandas.core.arrays import PeriodArray
from pandas.tests.test_base import Ops
+import pandas.util.testing as tm
class TestPeriodIndexOps(Ops):
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index eec761395d971..fcf1156266880 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -3,8 +3,7 @@
import pandas as pd
from pandas import (
- DataFrame, DatetimeIndex, Period, PeriodIndex, Series, period_range
-)
+ DataFrame, DatetimeIndex, Period, PeriodIndex, Series, period_range)
from pandas.util import testing as tm
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 300d5ef609b3e..8b2e91450c8c0 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -1,12 +1,12 @@
import numpy as np
import pytest
-import pandas as pd
import pandas.util._test_decorators as td
+
+import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, NaT, Period, PeriodIndex, Series,
- date_range, offsets, period_range
-)
+ date_range, offsets, period_range)
from pandas.util import testing as tm
from ..datetimelike import DatetimeLike
diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py
index fd8d2f94c1799..11d38df1dd49c 100644
--- a/pandas/tests/indexes/period/test_period_range.py
+++ b/pandas/tests/indexes/period/test_period_range.py
@@ -1,7 +1,7 @@
import pytest
-import pandas.util.testing as tm
from pandas import NaT, Period, PeriodIndex, date_range, period_range
+import pandas.util.testing as tm
class TestPeriodRange(object):
diff --git a/pandas/tests/indexes/period/test_scalar_compat.py b/pandas/tests/indexes/period/test_scalar_compat.py
index 48c1d5b8dd706..01fb1e3dc7e02 100644
--- a/pandas/tests/indexes/period/test_scalar_compat.py
+++ b/pandas/tests/indexes/period/test_scalar_compat.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
"""Tests for PeriodIndex behaving like a vectorized Period scalar"""
-import pandas.util.testing as tm
from pandas import PeriodIndex, Timedelta, date_range
+import pandas.util.testing as tm
class TestPeriodIndexOps(object):
diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py
index 479104fedcf74..c4dd23b1708db 100644
--- a/pandas/tests/indexes/period/test_setops.py
+++ b/pandas/tests/indexes/period/test_setops.py
@@ -2,9 +2,9 @@
import pytest
import pandas as pd
+from pandas import Index, PeriodIndex, date_range, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
-from pandas import Index, PeriodIndex, date_range, period_range
def _permute(obj):
diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py
index 2919b97da5825..8d09273bde63d 100644
--- a/pandas/tests/indexes/period/test_tools.py
+++ b/pandas/tests/indexes/period/test_tools.py
@@ -3,15 +3,15 @@
import numpy as np
import pytest
+from pandas._libs.tslibs.ccalendar import MONTHS
+from pandas.compat import lrange
+
import pandas as pd
-import pandas.core.indexes.period as period
-import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Period, PeriodIndex, Series, Timedelta, Timestamp,
- date_range, period_range, to_datetime
-)
-from pandas._libs.tslibs.ccalendar import MONTHS
-from pandas.compat import lrange
+ date_range, period_range, to_datetime)
+import pandas.core.indexes.period as period
+import pandas.util.testing as tm
class TestPeriodRepresentation(object):
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 6f151134bd24f..8373cbc89149a 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -5,12 +5,13 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import Float64Index, Index, Int64Index, Series, UInt64Index
from pandas._libs.tslibs import Timestamp
from pandas.compat import range
+
+import pandas as pd
+from pandas import Float64Index, Index, Int64Index, Series, UInt64Index
from pandas.tests.indexes.common import Base
+import pandas.util.testing as tm
class Numeric(Base):
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 4bd66c070e731..ecda48822eb0f 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -1,16 +1,17 @@
# -*- coding: utf-8 -*-
-import operator
from datetime import datetime
from itertools import combinations
+import operator
import numpy as np
import pytest
+from pandas.compat import PY3, range, u
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import Float64Index, Index, Int64Index, RangeIndex, Series, isna
-from pandas.compat import PY3, range, u
+import pandas.util.testing as tm
from .test_numeric import Numeric
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 1fd9bf86ff6ee..5053c7f3d9875 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -6,15 +6,17 @@
import numpy as np
import pytest
-from pandas import (
- DataFrame, Float64Index, MultiIndex, Panel, Series, UInt64Index,
- date_range
-)
from pandas.compat import lrange
+
from pandas.core.dtypes.common import is_scalar
-from pandas.io.formats.printing import pprint_thing
+
+from pandas import (
+ DataFrame, Float64Index, MultiIndex, Panel, Series, UInt64Index,
+ date_range)
from pandas.util import testing as tm
+from pandas.io.formats.printing import pprint_thing
+
_verbose = False
diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py
index 67fa31637fc25..938caec006f3a 100644
--- a/pandas/tests/indexing/interval/test_interval.py
+++ b/pandas/tests/indexing/interval/test_interval.py
@@ -2,8 +2,8 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import DataFrame, Interval, IntervalIndex, Series
+import pandas.util.testing as tm
class TestIntervalIndex(object):
diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py
index 080f2abdad7de..4b2ec0c4d17bf 100644
--- a/pandas/tests/indexing/interval/test_interval_new.py
+++ b/pandas/tests/indexing/interval/test_interval_new.py
@@ -1,8 +1,8 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
from pandas import Interval, IntervalIndex, Series
+import pandas.util.testing as tm
pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316")
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 85f95e79cc003..3a235e1eeb0dc 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -3,15 +3,16 @@
import numpy as np
import pytest
-import pandas as pd
import pandas.compat as compat
+
+from pandas.core.dtypes.common import is_categorical_dtype
+from pandas.core.dtypes.dtypes import CategoricalDtype
+
+import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, Index, Interval, Series,
- Timestamp
-)
+ Timestamp)
from pandas.api.types import CategoricalDtype as CDT
-from pandas.core.dtypes.common import is_categorical_dtype
-from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index 3762585558082..71fec75f9a7d3 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -4,8 +4,7 @@
import pandas as pd
from pandas import (
DataFrame, MultiIndex, Series, Timestamp, compat, date_range,
- option_context
-)
+ option_context)
from pandas.core import common as com
from pandas.util import testing as tm
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 643b6081d176a..65110d4955294 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -5,8 +5,9 @@
import numpy as np
import pytest
-import pandas as pd
import pandas.compat as compat
+
+import pandas as pd
import pandas.util.testing as tm
###############################################################
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index 5fd1724f6e95c..11fb90ebd9bb9 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -1,7 +1,7 @@
from datetime import datetime, timedelta
-import numpy as np
from dateutil import tz
+import numpy as np
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index c7fe9959b048a..0a55b3f67dd3f 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -5,10 +5,9 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
from pandas import (
- DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series
-)
+ DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series)
+import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_series_equal
ignore_ix = pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index e2b6a37096493..6d0b516d8ebf0 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -5,10 +5,11 @@
import numpy as np
import pytest
+from pandas.compat import lmap, lrange
+
import pandas as pd
from pandas import DataFrame, Series, concat, date_range, isna
from pandas.api.types import is_scalar
-from pandas.compat import lmap, lrange
from pandas.tests.indexing.common import Base
from pandas.util import testing as tm
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index e96e249743468..3b95ba8e4b9d8 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -3,22 +3,23 @@
""" test fancy indexing & misc """
-import weakref
from datetime import datetime
from warnings import catch_warnings, simplefilter
+import weakref
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import DataFrame, Index, MultiIndex, NaT, Series
from pandas.compat import PY2, StringIO, lrange, lzip, range
+
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
+
+import pandas as pd
+from pandas import DataFrame, Index, MultiIndex, NaT, Series
from pandas.core.indexing import (
- _maybe_numeric_slice, _non_reducing_slice, validate_indices
-)
+ _maybe_numeric_slice, _non_reducing_slice, validate_indices)
from pandas.tests.indexing.common import Base, _mklbl
+import pandas.util.testing as tm
# ------------------------------------------------------------------------
# Indexing test cases
diff --git a/pandas/tests/indexing/test_indexing_engines.py b/pandas/tests/indexing/test_indexing_engines.py
index 410eba99948ce..dcdfbcb7fbea2 100644
--- a/pandas/tests/indexing/test_indexing_engines.py
+++ b/pandas/tests/indexing/test_indexing_engines.py
@@ -1,9 +1,10 @@
import numpy as np
-import pandas.util.testing as tm
-from pandas import compat
from pandas._libs import algos as libalgos, index as libindex
+from pandas import compat
+import pandas.util.testing as tm
+
class TestNumericEngine(object):
def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype):
diff --git a/pandas/tests/indexing/test_indexing_slow.py b/pandas/tests/indexing/test_indexing_slow.py
index 9715fc2bb5946..5fd1431ac210c 100644
--- a/pandas/tests/indexing/test_indexing_slow.py
+++ b/pandas/tests/indexing/test_indexing_slow.py
@@ -6,8 +6,8 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas.core.api import DataFrame, MultiIndex, Series
+import pandas.util.testing as tm
class TestIndexingSlow(object):
diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py
index 0e99be31f6b93..850f80241a477 100644
--- a/pandas/tests/indexing/test_ix.py
+++ b/pandas/tests/indexing/test_ix.py
@@ -5,11 +5,13 @@
import numpy as np
import pytest
-import pandas as pd
-from pandas import DataFrame, MultiIndex, Series, option_context
from pandas.compat import lrange
-from pandas.core.dtypes.common import is_scalar
from pandas.errors import PerformanceWarning
+
+from pandas.core.dtypes.common import is_scalar
+
+import pandas as pd
+from pandas import DataFrame, MultiIndex, Series, option_context
from pandas.util import testing as tm
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 9314c2bcc8415..df0180c7a5bf7 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -6,10 +6,11 @@
import numpy as np
import pytest
+from pandas.compat import PY2, StringIO, lrange
+
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range
from pandas.api.types import is_scalar
-from pandas.compat import PY2, StringIO, lrange
from pandas.tests.indexing.common import Base
from pandas.util import testing as tm
diff --git a/pandas/tests/indexing/test_multiindex.py b/pandas/tests/indexing/test_multiindex.py
index 42d7b35a0a2fd..dcf148f199d52 100644
--- a/pandas/tests/indexing/test_multiindex.py
+++ b/pandas/tests/indexing/test_multiindex.py
@@ -3,11 +3,11 @@
import numpy as np
import pytest
+from pandas.errors import PerformanceWarning, UnsortedIndexError
+
import pandas as pd
from pandas import (
- DataFrame, Index, MultiIndex, Panel, Series, Timestamp, date_range
-)
-from pandas.errors import PerformanceWarning, UnsortedIndexError
+ DataFrame, Index, MultiIndex, Panel, Series, Timestamp, date_range)
from pandas.tests.indexing.common import _mklbl
from pandas.util import testing as tm
diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py
index 7769b62dc0206..edcfe1c0768cd 100644
--- a/pandas/tests/io/parser/c_parser_only.py
+++ b/pandas/tests/io/parser/c_parser_only.py
@@ -14,11 +14,12 @@
import numpy as np
import pytest
-import pandas as pd
+from pandas.compat import StringIO, lrange, range
import pandas.util._test_decorators as td
-import pandas.util.testing as tm
+
+import pandas as pd
from pandas import DataFrame
-from pandas.compat import StringIO, lrange, range
+import pandas.util.testing as tm
class CParserTests(object):
diff --git a/pandas/tests/io/parser/comment.py b/pandas/tests/io/parser/comment.py
index c71f92d8fb1be..fc2310ca1daaf 100644
--- a/pandas/tests/io/parser/comment.py
+++ b/pandas/tests/io/parser/comment.py
@@ -7,10 +7,11 @@
import numpy as np
-import pandas.util.testing as tm
-from pandas import DataFrame
from pandas.compat import StringIO
+from pandas import DataFrame
+import pandas.util.testing as tm
+
class CommentTests(object):
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index 82557815ed5cf..da8118ef3e123 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -1,24 +1,26 @@
# -*- coding: utf-8 -*-
import codecs
+from collections import OrderedDict
import csv
+from datetime import datetime
+from io import TextIOWrapper
import os
import platform
import re
import sys
-from collections import OrderedDict
-from datetime import datetime
-from io import TextIOWrapper
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import DataFrame, Index, MultiIndex, Series, compat
from pandas._libs.tslib import Timestamp
from pandas.compat import PY3, BytesIO, StringIO, lrange, range, u
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
+
+import pandas as pd
+from pandas import DataFrame, Index, MultiIndex, Series, compat
+import pandas.util.testing as tm
+
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py
index 5efed89663e0f..2d32e383c7fee 100644
--- a/pandas/tests/io/parser/compression.py
+++ b/pandas/tests/io/parser/compression.py
@@ -10,9 +10,10 @@
import pytest
-import pandas as pd
import pandas.compat as compat
import pandas.util._test_decorators as td
+
+import pandas as pd
import pandas.util.testing as tm
try:
diff --git a/pandas/tests/io/parser/converters.py b/pandas/tests/io/parser/converters.py
index be14e7be0ab9b..f17ad019469ab 100644
--- a/pandas/tests/io/parser/converters.py
+++ b/pandas/tests/io/parser/converters.py
@@ -10,12 +10,13 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import DataFrame, Index
from pandas._libs.tslib import Timestamp
from pandas.compat import StringIO, lmap, parse_date
+import pandas as pd
+from pandas import DataFrame, Index
+import pandas.util.testing as tm
+
class ConverterTests(object):
diff --git a/pandas/tests/io/parser/dialect.py b/pandas/tests/io/parser/dialect.py
index 2a519a0083f40..480ce9ef361d0 100644
--- a/pandas/tests/io/parser/dialect.py
+++ b/pandas/tests/io/parser/dialect.py
@@ -7,11 +7,12 @@
import csv
-import pandas.util.testing as tm
-from pandas import DataFrame
from pandas.compat import StringIO
from pandas.errors import ParserWarning
+from pandas import DataFrame
+import pandas.util.testing as tm
+
class DialectTests(object):
diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py
index 00dfb616c18ca..950795b33d460 100644
--- a/pandas/tests/io/parser/dtypes.py
+++ b/pandas/tests/io/parser/dtypes.py
@@ -8,13 +8,15 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import Categorical, DataFrame, Index, MultiIndex, Series
from pandas.compat import StringIO
-from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
+from pandas.core.dtypes.dtypes import CategoricalDtype
+
+import pandas as pd
+from pandas import Categorical, DataFrame, Index, MultiIndex, Series
+import pandas.util.testing as tm
+
class DtypeTests(object):
diff --git a/pandas/tests/io/parser/header.py b/pandas/tests/io/parser/header.py
index ced59fe1696d6..3a6db0fafa7c6 100644
--- a/pandas/tests/io/parser/header.py
+++ b/pandas/tests/io/parser/header.py
@@ -10,10 +10,11 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
-from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, lrange, u
+from pandas import DataFrame, Index, MultiIndex
+import pandas.util.testing as tm
+
class HeaderTests(object):
diff --git a/pandas/tests/io/parser/index_col.py b/pandas/tests/io/parser/index_col.py
index 973fa4c57545a..2909ef6214e62 100644
--- a/pandas/tests/io/parser/index_col.py
+++ b/pandas/tests/io/parser/index_col.py
@@ -8,10 +8,11 @@
import pytest
-import pandas.util.testing as tm
-from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO
+from pandas import DataFrame, Index, MultiIndex
+import pandas.util.testing as tm
+
class IndexColTests(object):
diff --git a/pandas/tests/io/parser/mangle_dupes.py b/pandas/tests/io/parser/mangle_dupes.py
index d043f64460028..1ebfa9cb0f645 100644
--- a/pandas/tests/io/parser/mangle_dupes.py
+++ b/pandas/tests/io/parser/mangle_dupes.py
@@ -6,10 +6,11 @@
de-duplicated (if mangling requested) or ignored otherwise.
"""
-import pandas.util.testing as tm
-from pandas import DataFrame
from pandas.compat import StringIO
+from pandas import DataFrame
+import pandas.util.testing as tm
+
class DupeColumnTests(object):
def test_basic(self):
diff --git a/pandas/tests/io/parser/multithread.py b/pandas/tests/io/parser/multithread.py
index dee7067ea8728..0be3a429f5f64 100644
--- a/pandas/tests/io/parser/multithread.py
+++ b/pandas/tests/io/parser/multithread.py
@@ -11,10 +11,11 @@
import numpy as np
+from pandas.compat import BytesIO, range
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import DataFrame
-from pandas.compat import BytesIO, range
+import pandas.util.testing as tm
def _construct_dataframe(num_rows):
diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py
index 392f4de2ecaaf..5e67b62879acb 100644
--- a/pandas/tests/io/parser/na_values.py
+++ b/pandas/tests/io/parser/na_values.py
@@ -6,14 +6,16 @@
"""
import numpy as np
-import pytest
from numpy import nan
+import pytest
-import pandas.io.common as com
-import pandas.util.testing as tm
-from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, range
+from pandas import DataFrame, Index, MultiIndex
+import pandas.util.testing as tm
+
+import pandas.io.common as com
+
class NAvaluesTests(object):
diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py
index d893b91226976..4c2c5b754f9bb 100644
--- a/pandas/tests/io/parser/parse_dates.py
+++ b/pandas/tests/io/parser/parse_dates.py
@@ -12,16 +12,19 @@
import pytest
import pytz
-import pandas as pd
-import pandas.io.date_converters as conv
-import pandas.io.parsers as parsers
-import pandas.util.testing as tm
-from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series, compat
from pandas._libs.tslib import Timestamp
from pandas._libs.tslibs import parsing
+import pandas.compat as compat
from pandas.compat import StringIO, lrange, parse_date
from pandas.compat.numpy import np_array_datetime64_compat
+
+import pandas as pd
+from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series
from pandas.core.indexes.datetimes import date_range
+import pandas.util.testing as tm
+
+import pandas.io.date_converters as conv
+import pandas.io.parsers as parsers
class ParseDatesTests(object):
diff --git a/pandas/tests/io/parser/python_parser_only.py b/pandas/tests/io/parser/python_parser_only.py
index 5d2f6b7231a5d..c3c87bca24a47 100644
--- a/pandas/tests/io/parser/python_parser_only.py
+++ b/pandas/tests/io/parser/python_parser_only.py
@@ -11,11 +11,13 @@
import pytest
-import pandas.util.testing as tm
-from pandas import DataFrame, Index, compat
+import pandas.compat as compat
from pandas.compat import BytesIO, StringIO, u
from pandas.errors import ParserError
+from pandas import DataFrame, Index
+import pandas.util.testing as tm
+
class PythonParserTests(object):
diff --git a/pandas/tests/io/parser/quoting.py b/pandas/tests/io/parser/quoting.py
index 27ee824cd2556..270a5430e6da4 100644
--- a/pandas/tests/io/parser/quoting.py
+++ b/pandas/tests/io/parser/quoting.py
@@ -7,11 +7,12 @@
import csv
-import pandas.util.testing as tm
-from pandas import DataFrame
from pandas.compat import PY3, StringIO, u
from pandas.errors import ParserError
+from pandas import DataFrame
+import pandas.util.testing as tm
+
class QuotingTests(object):
diff --git a/pandas/tests/io/parser/skiprows.py b/pandas/tests/io/parser/skiprows.py
index 89a7b5fd0ea1f..5d1b3b207a240 100644
--- a/pandas/tests/io/parser/skiprows.py
+++ b/pandas/tests/io/parser/skiprows.py
@@ -9,11 +9,12 @@
import numpy as np
-import pandas.util.testing as tm
-from pandas import DataFrame
from pandas.compat import StringIO, lrange, range
from pandas.errors import EmptyDataError
+from pandas import DataFrame
+import pandas.util.testing as tm
+
class SkipRowsTests(object):
diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py
index 8b8bf3a339d85..e54da94089cfd 100644
--- a/pandas/tests/io/parser/test_network.py
+++ b/pandas/tests/io/parser/test_network.py
@@ -9,10 +9,12 @@
import numpy as np
import pytest
+from pandas.compat import BytesIO, StringIO
import pandas.util._test_decorators as td
-import pandas.util.testing as tm
+
from pandas import DataFrame
-from pandas.compat import BytesIO, StringIO
+import pandas.util.testing as tm
+
from pandas.io.parsers import read_csv
diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py
index 00879c096aa50..50d927176a7b4 100644
--- a/pandas/tests/io/parser/test_parsers.py
+++ b/pandas/tests/io/parser/test_parsers.py
@@ -4,12 +4,13 @@
import pytest
-import pandas.core.common as com
-import pandas.util.testing as tm
-from pandas import DataFrame, read_csv, read_table
from pandas._libs.tslib import Timestamp
from pandas.compat import StringIO
+from pandas import DataFrame, read_csv, read_table
+import pandas.core.common as com
+import pandas.util.testing as tm
+
from .c_parser_only import CParserTests
from .comment import CommentTests
from .common import ParserTests
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 468640ae327bb..f7846f7824ba5 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -11,10 +11,13 @@
import numpy as np
import pytest
+import pandas.compat as compat
+from pandas.compat import BytesIO, StringIO
+
import pandas as pd
+from pandas import DataFrame
import pandas.util.testing as tm
-from pandas import DataFrame, compat
-from pandas.compat import BytesIO, StringIO
+
from pandas.io.parsers import EmptyDataError, read_csv, read_fwf
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index 94c69bbbb8d5f..93c115ae0a57b 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -9,17 +9,20 @@
import sys
import numpy as np
-import pytest
from numpy import nan
+import pytest
import pandas._libs.parsers as parser
-import pandas.util.testing as tm
-from pandas import DataFrame, compat
from pandas._libs.parsers import TextReader
+import pandas.compat as compat
from pandas.compat import BytesIO, StringIO, map
-from pandas.io.parsers import TextFileReader, read_csv
+
+from pandas import DataFrame
+import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
+from pandas.io.parsers import TextFileReader, read_csv
+
class TestTextReader(object):
diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py
index c6fd250116779..4437b0db9054e 100644
--- a/pandas/tests/io/parser/test_unsupported.py
+++ b/pandas/tests/io/parser/test_unsupported.py
@@ -11,10 +11,12 @@
import pytest
-import pandas.io.parsers as parsers
-import pandas.util.testing as tm
from pandas.compat import StringIO
from pandas.errors import ParserError
+
+import pandas.util.testing as tm
+
+import pandas.io.parsers as parsers
from pandas.io.parsers import read_csv
diff --git a/pandas/tests/io/parser/usecols.py b/pandas/tests/io/parser/usecols.py
index 5fd2dc3f48e3e..82d45b163d16a 100644
--- a/pandas/tests/io/parser/usecols.py
+++ b/pandas/tests/io/parser/usecols.py
@@ -8,11 +8,12 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
-from pandas import DataFrame, Index
from pandas._libs.tslib import Timestamp
from pandas.compat import StringIO
+from pandas import DataFrame, Index
+import pandas.util.testing as tm
+
class UsecolsTests(object):
msg_validate_usecols_arg = ("'usecols' must either be list-like of all "
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index 7b9e23fca59aa..c6b6f6cab9ddd 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -15,7 +15,7 @@
import pandas as pd
import pandas.util.testing as tm
-from pandas import compat
+import pandas.compat as compat
from pandas.compat import iterkeys
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.frame import DataFrame, Series
diff --git a/pandas/tests/scalar/interval/test_ops.py b/pandas/tests/scalar/interval/test_ops.py
index cfd9fc34faeff..7eca24aa8af25 100644
--- a/pandas/tests/scalar/interval/test_ops.py
+++ b/pandas/tests/scalar/interval/test_ops.py
@@ -1,8 +1,8 @@
"""Tests for Interval-Interval operations, such as overlaps, contains, etc."""
import pytest
-import pandas.util.testing as tm
from pandas import Interval, Timedelta, Timestamp
+import pandas.util.testing as tm
@pytest.fixture(params=[
diff --git a/pandas/tests/series/common.py b/pandas/tests/series/common.py
index ec7558e41ab40..cacca38b2d608 100644
--- a/pandas/tests/series/common.py
+++ b/pandas/tests/series/common.py
@@ -1,6 +1,7 @@
+from pandas.util._decorators import cache_readonly
+
import pandas as pd
import pandas.util.testing as tm
-from pandas.util._decorators import cache_readonly
_ts = tm.makeTimeSeries()
diff --git a/pandas/tests/series/conftest.py b/pandas/tests/series/conftest.py
index 352e4df54fe5b..431aacb1c8d56 100644
--- a/pandas/tests/series/conftest.py
+++ b/pandas/tests/series/conftest.py
@@ -1,7 +1,7 @@
import pytest
-import pandas.util.testing as tm
from pandas import Series
+import pandas.util.testing as tm
@pytest.fixture
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index ea6788a1a36ee..25c930e8cade6 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -4,13 +4,15 @@
from datetime import datetime
import numpy as np
-import pytest
from numpy import nan
+import pytest
+
+import pandas.compat as compat
+from pandas.compat import lrange, range
import pandas as pd
+from pandas import Categorical, Series, date_range, isna
import pandas.util.testing as tm
-from pandas import Categorical, Series, compat, date_range, isna
-from pandas.compat import lrange, range
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py
index df92851f43227..5d1b81ba7dc1c 100644
--- a/pandas/tests/series/indexing/test_boolean.py
+++ b/pandas/tests/series/indexing/test_boolean.py
@@ -4,15 +4,18 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import Index, Series, Timestamp, date_range, isna
from pandas.compat import lrange, range
+
from pandas.core.dtypes.common import is_integer
+
+import pandas as pd
+from pandas import Index, Series, Timestamp, date_range, isna
from pandas.core.indexing import IndexingError
-from pandas.tseries.offsets import BDay
+import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
+from pandas.tseries.offsets import BDay
+
def test_getitem_boolean(test_data):
s = test_data.series
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index d63150165d7d3..a0da25c96caa6 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -6,15 +6,15 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas._libs.index as _index
-import pandas.util.testing as tm
-from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
from pandas._libs import tslib
+import pandas._libs.index as _index
from pandas.compat import lrange, range
+
+import pandas as pd
+from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
+import pandas.util.testing as tm
from pandas.util.testing import (
- assert_almost_equal, assert_frame_equal, assert_series_equal
-)
+ assert_almost_equal, assert_frame_equal, assert_series_equal)
"""
diff --git a/pandas/tests/series/indexing/test_iloc.py b/pandas/tests/series/indexing/test_iloc.py
index 64ef2078cbe78..fa85da6a70d62 100644
--- a/pandas/tests/series/indexing/test_iloc.py
+++ b/pandas/tests/series/indexing/test_iloc.py
@@ -3,8 +3,9 @@
import numpy as np
-from pandas import Series
from pandas.compat import lrange, range
+
+from pandas import Series
from pandas.util.testing import assert_almost_equal, assert_series_equal
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index e926e477d0bc4..1582bd119c806 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -8,16 +8,18 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import (
- Categorical, DataFrame, MultiIndex, Series, Timedelta, Timestamp
-)
from pandas.compat import lrange, range
+
from pandas.core.dtypes.common import is_scalar
-from pandas.tseries.offsets import BDay
+
+import pandas as pd
+from pandas import (
+ Categorical, DataFrame, MultiIndex, Series, Timedelta, Timestamp)
+import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
+from pandas.tseries.offsets import BDay
+
def test_basic_indexing():
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
diff --git a/pandas/tests/series/indexing/test_loc.py b/pandas/tests/series/indexing/test_loc.py
index 596b5d022c68d..36c26267ecd5f 100644
--- a/pandas/tests/series/indexing/test_loc.py
+++ b/pandas/tests/series/indexing/test_loc.py
@@ -4,9 +4,10 @@
import numpy as np
import pytest
+from pandas.compat import lrange
+
import pandas as pd
from pandas import Series, Timestamp
-from pandas.compat import lrange
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index 0b61274801de1..da0e15b8a96fc 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -4,10 +4,11 @@
import numpy as np
import pytest
+from pandas.compat import lrange, range
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import DataFrame, Index, Series
-from pandas.compat import lrange, range
+import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index 5193307a3c256..be3d0cd6d929c 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -6,10 +6,11 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
-from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series
from pandas.compat import lrange, range, zip
+from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series
+import pandas.util.testing as tm
+
class TestSeriesAlterAxes(object):
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index c84cf2ee50e4b..3f14c80e77dd0 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1,30 +1,29 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import operator
from distutils.version import LooseVersion
from itertools import product
+import operator
import numpy as np
-import pytest
from numpy import nan
+import pytest
-import pandas as pd
-import pandas.core.nanops as nanops
+from pandas.compat import PY35, lrange, range
import pandas.util._test_decorators as td
-import pandas.util.testing as tm
+
+import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, Series, bdate_range, compat,
- date_range, isna, notna
-)
-from pandas.compat import PY35, lrange, range
+ date_range, isna, notna)
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
+import pandas.core.nanops as nanops
+import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_index_equal,
- assert_series_equal
-)
+ assert_series_equal)
from .common import TestData
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 32181fe3dad39..bd0d02014dcdb 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -1,24 +1,26 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
+from collections import OrderedDict
import pydoc
import warnings
-from collections import OrderedDict
import numpy as np
import pytest
+import pandas.compat as compat
+from pandas.compat import isidentifier, lzip, range, string_types
+
import pandas as pd
-import pandas.io.formats.printing as printing
-import pandas.util.testing as tm
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, Series, TimedeltaIndex,
- compat, date_range, period_range, timedelta_range
-)
-from pandas.compat import isidentifier, lzip, range, string_types
+ date_range, period_range, timedelta_range)
from pandas.core.arrays import PeriodArray
from pandas.core.indexes.datetimes import Timestamp
+import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal, ensure_clean
+import pandas.io.formats.printing as printing
+
from .common import TestData
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index 62a677b82aeb4..f4c8ebe64630c 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -7,11 +7,13 @@
import numpy as np
import pytest
-import pandas as pd
-import pandas.util.testing as tm
-from pandas import DataFrame, Index, Series, compat, isna
+import pandas.compat as compat
from pandas.compat import lrange
+
+import pandas as pd
+from pandas import DataFrame, Index, Series, isna
from pandas.conftest import _get_cython_table_params
+import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index e781488a799ec..979775633f644 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -1,14 +1,14 @@
# -*- coding: utf-8 -*-
-import operator
from datetime import timedelta
+import operator
import numpy as np
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import Series, compat
from pandas.core.indexes.period import IncompatibleFrequency
+import pandas.util.testing as tm
def _permute(obj):
diff --git a/pandas/tests/series/test_asof.py b/pandas/tests/series/test_asof.py
index 2d18b61750996..488fc894b953e 100644
--- a/pandas/tests/series/test_asof.py
+++ b/pandas/tests/series/test_asof.py
@@ -3,8 +3,8 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
from pandas import Series, Timestamp, date_range, isna, notna, offsets
+import pandas.util.testing as tm
class TestSeriesAsof():
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 539e3c560b662..a685eb7e9fbd3 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -4,12 +4,12 @@
from datetime import datetime
import numpy as np
-import pytest
from numpy import nan
+import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import DataFrame, DatetimeIndex, Series, compat, date_range
+import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 7595b1278a291..494321c5190a6 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -5,24 +5,24 @@
from datetime import datetime, timedelta
import numpy as np
+from numpy import nan
import numpy.ma as ma
import pytest
-from numpy import nan
+
+from pandas._libs import lib
+from pandas._libs.tslib import iNaT
+from pandas.compat import PY36, long, lrange, range, zip
+
+from pandas.core.dtypes.common import (
+ is_categorical_dtype, is_datetime64tz_dtype)
import pandas as pd
-import pandas.util.testing as tm
from pandas import (
Categorical, DataFrame, Index, IntervalIndex, MultiIndex, NaT, Series,
- Timestamp, date_range, isna, period_range, timedelta_range
-)
-from pandas._libs import lib
-from pandas._libs.tslib import iNaT
+ Timestamp, date_range, isna, period_range, timedelta_range)
from pandas.api.types import CategoricalDtype
-from pandas.compat import PY36, long, lrange, range, zip
from pandas.core.arrays import period_array
-from pandas.core.dtypes.common import (
- is_categorical_dtype, is_datetime64tz_dtype
-)
+import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index 618745eb25a5a..1fd95c4205b0e 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -2,24 +2,25 @@
# pylint: disable-msg=E1101,W0612
import calendar
+from datetime import date, datetime, time
import locale
import unicodedata
-from datetime import date, datetime, time
import numpy as np
import pytest
import pytz
+from pandas._libs.tslibs.timezones import maybe_get_tz
+
+from pandas.core.dtypes.common import is_integer_dtype, is_list_like
+
import pandas as pd
-import pandas.core.common as com
-import pandas.util.testing as tm
from pandas import (
DataFrame, DatetimeIndex, Index, PeriodIndex, Series, TimedeltaIndex,
- bdate_range, compat, date_range, period_range, timedelta_range
-)
-from pandas._libs.tslibs.timezones import maybe_get_tz
+ bdate_range, compat, date_range, period_range, timedelta_range)
from pandas.core.arrays import PeriodArray
-from pandas.core.dtypes.common import is_integer_dtype, is_list_like
+import pandas.core.common as com
+import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 64184a6465ba3..a3aaabb70ae8c 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -1,23 +1,23 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
+from datetime import datetime, timedelta
import string
import sys
-from datetime import datetime, timedelta
import numpy as np
-import pytest
from numpy import nan
+import pytest
-import pandas as pd
import pandas._libs.tslib as tslib
-import pandas.util.testing as tm
+import pandas.compat as compat
+from pandas.compat import lrange, range, u
+
+import pandas as pd
from pandas import (
- Categorical, DataFrame, Index, Series, Timedelta, Timestamp, compat,
- date_range
-)
+ Categorical, DataFrame, Index, Series, Timedelta, Timestamp, date_range)
from pandas.api.types import CategoricalDtype
-from pandas.compat import lrange, range, u
+import pandas.util.testing as tm
class TestSeriesDtypes(object):
diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py
index f6759f039ed98..8e3b0d19447a1 100644
--- a/pandas/tests/series/test_internals.py
+++ b/pandas/tests/series/test_internals.py
@@ -4,14 +4,15 @@
from datetime import datetime
import numpy as np
-import pytest
from numpy import nan
+import pytest
-import pandas as pd
import pandas._libs.lib as lib
-import pandas.util.testing as tm
+
+import pandas as pd
from pandas import Series
from pandas.core.indexes.datetimes import Timestamp
+import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 033816dd8ee4b..5749b0c6551d6 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -7,14 +7,15 @@
import numpy as np
import pytest
+from pandas.compat import StringIO, u
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import DataFrame, Series
-from pandas.compat import StringIO, u
-from pandas.io.common import _get_handle
+import pandas.util.testing as tm
from pandas.util.testing import (
- assert_almost_equal, assert_frame_equal, assert_series_equal, ensure_clean
-)
+ assert_almost_equal, assert_frame_equal, assert_series_equal, ensure_clean)
+
+from pandas.io.common import _get_handle
class TestSeriesToCSV():
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index e89bcae052115..c38b7c0083a21 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -5,21 +5,21 @@
from distutils.version import LooseVersion
import numpy as np
+from numpy import nan
import pytest
import pytz
-from numpy import nan
-import pandas as pd
+from pandas._libs.tslib import iNaT
+from pandas.compat import range
+from pandas.errors import PerformanceWarning
import pandas.util._test_decorators as td
-import pandas.util.testing as tm
+
+import pandas as pd
from pandas import (
Categorical, DataFrame, Index, IntervalIndex, MultiIndex, NaT, Series,
- Timestamp, date_range, isna
-)
-from pandas._libs.tslib import iNaT
-from pandas.compat import range
+ Timestamp, date_range, isna)
from pandas.core.series import remove_na
-from pandas.errors import PerformanceWarning
+import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
try:
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 082ed5e0f5123..35bd99ff2eda8 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -1,25 +1,24 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import operator
from datetime import datetime, timedelta
+import operator
import numpy as np
-import pytest
from numpy import nan
+import pytest
+
+import pandas.compat as compat
+from pandas.compat import range
import pandas as pd
-import pandas.core.nanops as nanops
-import pandas.util.testing as tm
from pandas import (
- Categorical, DataFrame, Index, NaT, Series, bdate_range, compat,
- date_range, isna
-)
-from pandas.compat import range
+ Categorical, DataFrame, Index, NaT, Series, bdate_range, date_range, isna)
from pandas.core import ops
+import pandas.core.nanops as nanops
+import pandas.util.testing as tm
from pandas.util.testing import (
- assert_almost_equal, assert_frame_equal, assert_series_equal
-)
+ assert_almost_equal, assert_frame_equal, assert_series_equal)
from .common import TestData
diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
index 88a5ff261fbb4..ce620db8d9c1b 100644
--- a/pandas/tests/series/test_period.py
+++ b/pandas/tests/series/test_period.py
@@ -2,9 +2,9 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
from pandas import DataFrame, Period, Series, period_range
from pandas.core.arrays import PeriodArray
+import pandas.util.testing as tm
class TestSeriesPeriod(object):
diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py
index 1a4c72b9f35fc..027814c618303 100644
--- a/pandas/tests/series/test_quantile.py
+++ b/pandas/tests/series/test_quantile.py
@@ -4,11 +4,12 @@
import numpy as np
import pytest
+from pandas.core.dtypes.common import is_integer
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import Index, Series
-from pandas.core.dtypes.common import is_integer
from pandas.core.indexes.datetimes import Timestamp
+import pandas.util.testing as tm
from .common import TestData
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index afe1155932ea4..40a30cc8cf09a 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -3,17 +3,19 @@
from itertools import chain
import numpy as np
-import pytest
from numpy import nan
+import pytest
-import pandas.util._test_decorators as td
-import pandas.util.testing as tm
-from pandas import NaT, Series, Timestamp, compat, date_range
from pandas._libs.algos import Infinity, NegInfinity
from pandas._libs.tslib import iNaT
-from pandas.api.types import CategoricalDtype
+import pandas.compat as compat
from pandas.compat import product
+import pandas.util._test_decorators as td
+
+from pandas import NaT, Series, Timestamp, date_range
+from pandas.api.types import CategoricalDtype
from pandas.tests.series.common import TestData
+import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index 54318aa9a4a34..67c75f43e030c 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -4,8 +4,9 @@
import numpy as np
import pytest
-import pandas as pd
import pandas._libs.lib as lib
+
+import pandas as pd
import pandas.util.testing as tm
from .common import TestData
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 8e89dd0f44a4d..ef96274746655 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -1,20 +1,21 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import sys
from datetime import datetime, timedelta
+import sys
import numpy as np
+import pandas.compat as compat
+from pandas.compat import lrange, range, u
+
import pandas as pd
-import pandas.util.testing as tm
from pandas import (
- Categorical, DataFrame, Index, Series, compat, date_range, option_context,
- period_range, timedelta_range
-)
-from pandas.compat import lrange, range, u
+ Categorical, DataFrame, Index, Series, date_range, option_context,
+ period_range, timedelta_range)
from pandas.core.base import StringMixin
from pandas.core.index import MultiIndex
+import pandas.util.testing as tm
from .common import TestData
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py
index f15160d9ebab6..ef6998c1a3e12 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/test_sorting.py
@@ -5,8 +5,8 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
from pandas import Categorical, DataFrame, IntervalIndex, MultiIndex, Series
+import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_series_equal
from .common import TestData
diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py
index f48b4e6a7510f..68a162ee4c287 100644
--- a/pandas/tests/series/test_subclass.py
+++ b/pandas/tests/series/test_subclass.py
@@ -3,8 +3,8 @@
import numpy as np
import pandas as pd
-import pandas.util.testing as tm
from pandas import SparseDtype
+import pandas.util.testing as tm
class TestSeriesSubclassing(object):
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index b46570fcfb1a5..21f80f181c34d 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -6,23 +6,23 @@
import numpy as np
import pytest
-import pandas as pd
+from pandas._libs.tslib import iNaT
+from pandas.compat import StringIO, lrange, product
+from pandas.errors import NullFrequencyError
import pandas.util._test_decorators as td
-import pandas.util.testing as tm
+
+import pandas as pd
from pandas import (
DataFrame, Index, NaT, Series, Timestamp, concat, date_range, offsets,
- timedelta_range, to_datetime
-)
-from pandas._libs.tslib import iNaT
-from pandas.compat import StringIO, lrange, product
+ timedelta_range, to_datetime)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
-from pandas.errors import NullFrequencyError
from pandas.tests.series.common import TestData
-from pandas.tseries.offsets import BDay, BMonthEnd
+import pandas.util.testing as tm
from pandas.util.testing import (
- assert_almost_equal, assert_frame_equal, assert_series_equal
-)
+ assert_almost_equal, assert_frame_equal, assert_series_equal)
+
+from pandas.tseries.offsets import BDay, BMonthEnd
def _simple_ts(start, end, freq='D'):
diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py
index 0b65af6831d07..3c9701758f12c 100644
--- a/pandas/tests/series/test_timezones.py
+++ b/pandas/tests/series/test_timezones.py
@@ -4,16 +4,17 @@
"""
from datetime import datetime
+from dateutil.tz import tzoffset
import numpy as np
import pytest
import pytz
-from dateutil.tz import tzoffset
-import pandas.util.testing as tm
-from pandas import DatetimeIndex, Index, NaT, Series, Timestamp
from pandas._libs.tslibs import conversion, timezones
from pandas.compat import lrange
+
+from pandas import DatetimeIndex, Index, NaT, Series, Timestamp
from pandas.core.indexes.datetimes import date_range
+import pandas.util.testing as tm
class TestSeriesTimezones(object):
diff --git a/pandas/tests/tslibs/test_liboffsets.py b/pandas/tests/tslibs/test_liboffsets.py
index 50d8f546d8e58..388df6453634e 100644
--- a/pandas/tests/tslibs/test_liboffsets.py
+++ b/pandas/tests/tslibs/test_liboffsets.py
@@ -7,9 +7,10 @@
import pytest
import pandas._libs.tslibs.offsets as liboffsets
-from pandas import Timestamp
from pandas._libs.tslibs.offsets import roll_qtrday
+from pandas import Timestamp
+
def test_get_lastbday():
dt = datetime(2017, 11, 30)
diff --git a/pandas/tests/tslibs/test_timedeltas.py b/pandas/tests/tslibs/test_timedeltas.py
index 939c2b828a75f..50e64bb7c2082 100644
--- a/pandas/tests/tslibs/test_timedeltas.py
+++ b/pandas/tests/tslibs/test_timedeltas.py
@@ -2,9 +2,10 @@
import numpy as np
import pytest
-import pandas as pd
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
+import pandas as pd
+
def test_delta_to_nanoseconds():
obj = np.timedelta64(14, 'D')
diff --git a/pandas/tseries/api.py b/pandas/tseries/api.py
index 982a0e715e360..2094791ecdc60 100644
--- a/pandas/tseries/api.py
+++ b/pandas/tseries/api.py
@@ -4,5 +4,5 @@
# flake8: noqa
-import pandas.tseries.offsets as offsets
from pandas.tseries.frequencies import infer_freq
+import pandas.tseries.offsets as offsets
diff --git a/setup.cfg b/setup.cfg
index e1d0dc84f464a..1cfefa1bbaadd 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -77,11 +77,17 @@ exclude_lines =
directory = coverage_html_report
[isort]
+known_pre_core=pandas._libs,pandas.util._*,pandas.compat,pandas.errors
+known_dtypes=pandas.core.dtypes
+known_post_core=pandas.tseries,pandas.io,pandas.plotting
+sections=FUTURE,STDLIB,THIRDPARTY,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER
+
known_first_party=pandas
-known_third_party=Cython,numpy,python-dateutil,pytz
-multi_line_output=5
+known_third_party=Cython,numpy,python-dateutil,pytz,pyarrow
+multi_line_output=4
force_grid_wrap=0
combine_as_imports=True
+force_sort_within_sections=True
skip=
pandas/lib.py,
pandas/tslib.py,
| (not sure which tag(s) skip the CI)
I propose we change the isort settings to arrange within-pandas imports by dependency structure. i.e.
- `pandas._libs`, `pandas.compat`, `pandas.util._*`, `pandas.errors` are for the most part not dependent on pandas.core, so get their own section(s)
- `pandas.core.dtypes` for the most part does not depend on the rest of `pandas.core`, so it goes next
- then the rest of `pandas.core`
- then pandas non-core: `pandas.io`, `pandas.plotting`, `pandas.tseries`
Within blocks I propose we stick to alphabetical ordering. e.g. right now in `core.series` we have import `pandas.core.indexes.base as ibase` 35 lines away from all the other `pandas.core.indexes` imports. These should be adjacent.
The main thing I'd still like to fix/improve is that in this PR isort is failing to put `from pandas import compat` with the other `pandas.compat` imports.
Putting the trailing parentheses on the same line as the imports instead of a separate line is less important, but I prefer it because it makes it cleaner when I wrap lines in sublimetext. Happy to revert that if it is a sticking point. | https://api.github.com/repos/pandas-dev/pandas/pulls/23366 | 2018-10-26T19:42:01Z | 2018-10-29T12:09:38Z | 2018-10-29T12:09:38Z | 2018-10-30T22:48:13Z |
Fix import format at pandas/tests/extension directory | diff --git a/pandas/tests/extension/arrow/bool.py b/pandas/tests/extension/arrow/bool.py
index d595879e3cb7d..e6f997b01aad2 100644
--- a/pandas/tests/extension/arrow/bool.py
+++ b/pandas/tests/extension/arrow/bool.py
@@ -10,9 +10,10 @@
import numpy as np
import pyarrow as pa
+
import pandas as pd
from pandas.api.extensions import (
- ExtensionDtype, ExtensionArray, take, register_extension_dtype
+ ExtensionArray, ExtensionDtype, register_extension_dtype, take
)
diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py
index 5a01533cfc564..f259e66e6cc76 100644
--- a/pandas/tests/extension/arrow/test_bool.py
+++ b/pandas/tests/extension/arrow/test_bool.py
@@ -1,12 +1,13 @@
import numpy as np
import pytest
+
import pandas as pd
import pandas.util.testing as tm
from pandas.tests.extension import base
pytest.importorskip('pyarrow', minversion="0.10.0")
-from .bool import ArrowBoolDtype, ArrowBoolArray
+from .bool import ArrowBoolArray, ArrowBoolDtype
@pytest.fixture
@@ -17,7 +18,7 @@ def dtype():
@pytest.fixture
def data():
return ArrowBoolArray.from_scalars(np.random.randint(0, 2, size=100,
- dtype=bool))
+ dtype=bool))
@pytest.fixture
diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py
index d5cf9571e3622..e9d1f183812cc 100644
--- a/pandas/tests/extension/base/dtype.py
+++ b/pandas/tests/extension/base/dtype.py
@@ -1,6 +1,7 @@
import warnings
import numpy as np
+
import pandas as pd
from .base import BaseExtensionTests
diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py
index 22b21102fa4ae..00bb3b5d4eec2 100644
--- a/pandas/tests/extension/base/getitem.py
+++ b/pandas/tests/extension/base/getitem.py
@@ -1,5 +1,5 @@
-import pytest
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index 52c635d286df6..dd406ca0cd5ed 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -1,7 +1,8 @@
import pytest
-import pandas.util.testing as tm
import pandas as pd
+import pandas.util.testing as tm
+
from .base import BaseExtensionTests
diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py
index dce91d5a9ca9c..f7649415f2471 100644
--- a/pandas/tests/extension/base/methods.py
+++ b/pandas/tests/extension/base/methods.py
@@ -1,5 +1,5 @@
-import pytest
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py
index 7baa6284e398f..2161214190be0 100644
--- a/pandas/tests/extension/base/ops.py
+++ b/pandas/tests/extension/base/ops.py
@@ -1,9 +1,10 @@
-import pytest
-
import operator
+import pytest
+
import pandas as pd
from pandas.core import ops
+
from .base import BaseExtensionTests
diff --git a/pandas/tests/extension/base/reduce.py b/pandas/tests/extension/base/reduce.py
index 4f6c7988314c0..c4b70f2013265 100644
--- a/pandas/tests/extension/base/reduce.py
+++ b/pandas/tests/extension/base/reduce.py
@@ -1,7 +1,10 @@
import warnings
+
import pytest
-import pandas.util.testing as tm
+
import pandas as pd
+import pandas.util.testing as tm
+
from .base import BaseExtensionTests
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index 7f13c2cd67373..446912b66bf33 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -1,5 +1,5 @@
-import pytest
import numpy as np
+import pytest
import pandas as pd
from pandas.core.internals import ExtensionBlock
diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 029a77acb121f..72316b5b7eb91 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -5,6 +5,7 @@
import pandas as pd
import pandas.util.testing as tm
+
from .base import BaseExtensionTests
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index fe07aae61c5e2..e47b8049ae65c 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -6,8 +6,7 @@
import numpy as np
import pandas as pd
-from pandas.core.arrays import (ExtensionArray,
- ExtensionScalarOpsMixin)
+from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin
from pandas.core.dtypes.base import ExtensionDtype
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index be1c61166e4b1..59e77c3f0f3f3 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -1,15 +1,15 @@
-import operator
import decimal
+import operator
import numpy as np
-import pandas as pd
-from pandas import compat
-import pandas.util.testing as tm
import pytest
+import pandas as pd
+import pandas.util.testing as tm
+from pandas import compat
from pandas.tests.extension import base
-from .array import DecimalDtype, DecimalArray, make_data, to_decimal
+from .array import DecimalArray, DecimalDtype, make_data, to_decimal
@pytest.fixture
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py
index 5c63e50c3eaaa..75b0f6b02edad 100644
--- a/pandas/tests/extension/json/array.py
+++ b/pandas/tests/extension/json/array.py
@@ -20,8 +20,8 @@
import numpy as np
from pandas import compat
-from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.arrays import ExtensionArray
+from pandas.core.dtypes.base import ExtensionDtype
class JSONDtype(ExtensionDtype):
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 15d99f6c5d2fc..d6528e3085527 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -1,5 +1,5 @@
-import operator
import collections
+import operator
import pytest
diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py
index a4518798aa400..b5ded1890ae64 100644
--- a/pandas/tests/extension/test_categorical.py
+++ b/pandas/tests/extension/test_categorical.py
@@ -15,14 +15,14 @@
"""
import string
-import pytest
-import pandas as pd
import numpy as np
+import pytest
-from pandas.api.types import CategoricalDtype
+import pandas as pd
+import pandas.util.testing as tm
from pandas import Categorical
+from pandas.api.types import CategoricalDtype
from pandas.tests.extension import base
-import pandas.util.testing as tm
def make_data():
diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py
index a0a8f86a5d7b5..8a026a0c38283 100644
--- a/pandas/tests/extension/test_common.py
+++ b/pandas/tests/extension/test_common.py
@@ -4,8 +4,8 @@
import pandas as pd
import pandas.util.testing as tm
from pandas.core.arrays import ExtensionArray
-from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes import dtypes
+from pandas.core.dtypes.common import is_extension_array_dtype
class DummyDtype(dtypes.ExtensionDtype):
diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py
index aa32bf6051617..1b3f285e64059 100644
--- a/pandas/tests/extension/test_external_block.py
+++ b/pandas/tests/extension/test_external_block.py
@@ -2,14 +2,12 @@
# pylint: disable=W0102
import numpy as np
+import pytest
import pandas as pd
-from pandas.core.internals import (
- BlockManager, SingleBlockManager)
+from pandas.core.internals import BlockManager, SingleBlockManager
from pandas.core.internals.blocks import Block, NonConsolidatableMixIn
-import pytest
-
class CustomBlock(NonConsolidatableMixIn, Block):
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py
index 26b09d545378b..680cf83c64b9e 100644
--- a/pandas/tests/extension/test_integer.py
+++ b/pandas/tests/extension/test_integer.py
@@ -14,16 +14,16 @@
"""
import numpy as np
-import pandas as pd
import pytest
-from pandas.tests.extension import base
-from pandas.core.dtypes.common import is_extension_array_dtype
-
+import pandas as pd
from pandas.core.arrays import integer_array
from pandas.core.arrays.integer import (
- Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype,
- UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype)
+ Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype,
+ UInt32Dtype, UInt64Dtype
+)
+from pandas.core.dtypes.common import is_extension_array_dtype
+from pandas.tests.extension import base
def make_data():
diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py
index 183ebea927b10..3ffb88ef302e9 100644
--- a/pandas/tests/extension/test_interval.py
+++ b/pandas/tests/extension/test_interval.py
@@ -13,14 +13,14 @@
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
-import pytest
import numpy as np
+import pytest
+import pandas.util.testing as tm
from pandas import Interval
from pandas.core.arrays import IntervalArray
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.tests.extension import base
-import pandas.util.testing as tm
def make_data():
diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index 6946da6fdc36d..8c038b3950a26 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -1,11 +1,11 @@
-import pytest
-import pandas as pd
import numpy as np
+import pytest
+import pandas as pd
+import pandas.util.testing as tm
from pandas import SparseArray, SparseDtype
from pandas.errors import PerformanceWarning
from pandas.tests.extension import base
-import pandas.util.testing as tm
def make_data(fill_value):
diff --git a/setup.cfg b/setup.cfg
index 162c6983f9e29..e1d0dc84f464a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -246,6 +246,7 @@ skip=
pandas/tests/indexes/timedeltas/test_partial_slicing.py,
pandas/tests/indexes/timedeltas/test_timedelta_range.py,
pandas/tests/indexes/timedeltas/test_ops.py,
+ pandas/tests/series/test_duplicates.py,
pandas/tests/series/indexing/test_callable.py,
pandas/tests/arrays/test_datetimelike.py,
pandas/tests/arrays/test_integer.py,
@@ -255,29 +256,6 @@ skip=
pandas/tests/util/test_testing.py,
pandas/tests/util/test_util.py,
pandas/tests/util/test_hashing.py,
- pandas/tests/series/test_duplicates.py
- pandas/tests/extension/test_common.py,
- pandas/tests/extension/test_integer.py,
- pandas/tests/extension/test_external_block.py,
- pandas/tests/extension/test_interval.py,
- pandas/tests/extension/test_categorical.py,
- pandas/tests/extension/base/ops.py,
- pandas/tests/extension/base/reshaping.py,
- pandas/tests/extension/base/getitem.py,
- pandas/tests/extension/base/groupby.py,
- pandas/tests/extension/base/constructors.py,
- pandas/tests/extension/base/interface.py,
- pandas/tests/extension/base/dtype.py,
- pandas/tests/extension/base/casting.py,
- pandas/tests/extension/base/methods.py,
- pandas/tests/extension/base/missing.py,
- pandas/tests/extension/base/setitem.py,
- pandas/tests/extension/arrow/test_bool.py,
- pandas/tests/extension/arrow/bool.py,
- pandas/tests/extension/decimal/array.py,
- pandas/tests/extension/decimal/test_decimal.py,
- pandas/tests/extension/json/array.py,
- pandas/tests/extension/json/test_json.py,
pandas/tests/io/test_clipboard.py,
pandas/tests/io/test_compression.py,
pandas/tests/io/test_pytables.py,
@@ -483,3 +461,4 @@ skip=
pandas/plotting/_misc.py,
pandas/types/common.py,
pandas/plotting/_compat.py,
+ pandas/tests/extension/arrow/test_bool.py
| - [x] partial #23334
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
PR includes 21 files, but I think it's acceptable and unnecessary to divide it to two separate PRs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23365 | 2018-10-26T19:39:17Z | 2018-10-28T13:38:51Z | 2018-10-28T13:38:51Z | 2018-10-30T18:49:52Z |
Isort contributing guide | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 66d545a0de6e9..3ec505998fde0 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -612,6 +612,54 @@ Alternatively, you can install the ``grep`` and ``xargs`` commands via the
`MinGW <http://www.mingw.org/>`__ toolchain, and it will allow you to run the
commands above.
+.. _contributing.import-formatting:
+
+Import Formatting
+~~~~~~~~~~~~~~~~~
+*pandas* uses `isort <https://pypi.org/project/isort/>`__ to standardise import
+formatting across the codebase.
+
+A guide to import layout as per pep8 can be found `here <https://www.python.org/dev/peps/pep-0008/#imports/>`__.
+
+A summary of our current import sections ( in order ):
+
+* Future
+* Python Standard Library
+* Third Party
+* ``pandas._libs``, ``pandas.compat``, ``pandas.util._*``, ``pandas.errors`` (largely not dependent on ``pandas.core``)
+* ``pandas.core.dtypes`` (largely not dependent on the rest of ``pandas.core``)
+* Rest of ``pandas.core.*``
+* Non-core ``pandas.io``, ``pandas.plotting``, ``pandas.tseries``
+* Local application/library specific imports
+
+Imports are alphabetically sorted within these sections.
+
+
+As part of :ref:`Continuous Integration <contributing.ci>` checks we run::
+
+ isort --recursive --check-only pandas
+
+to check that imports are correctly formatted as per the `setup.cfg`.
+
+If you see output like the below in :ref:`Continuous Integration <contributing.ci>` checks:
+
+.. code-block:: shell
+
+ Check import format using isort
+ ERROR: /home/travis/build/pandas-dev/pandas/pandas/io/pytables.py Imports are incorrectly sorted
+ Check import format using isort DONE
+ The command "ci/code_checks.sh" exited with 1
+
+You should run::
+
+ isort pandas/io/pytables.py
+
+to automatically format imports correctly. This will modify your local copy of the files.
+
+The `--recursive` flag can be passed to sort all files in a directory.
+
+You can then verify the changes look ok, then git :ref:`commit <contributing.commit-code>` and :ref:`push <contributing.push-code>`.
+
Backwards Compatibility
~~~~~~~~~~~~~~~~~~~~~~~
@@ -1078,6 +1126,8 @@ or a new keyword argument (`example <https://github.com/pandas-dev/pandas/blob/v
Contributing your changes to *pandas*
=====================================
+.. _contributing.commit-code:
+
Committing your code
--------------------
@@ -1122,6 +1172,8 @@ Now you can commit your changes in your local repository::
git commit -m
+.. _contributing.push-code:
+
Pushing your changes
--------------------
diff --git a/setup.cfg b/setup.cfg
index e0f1ea6fe3ac9..8a3cfd1551a46 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -76,6 +76,7 @@ exclude_lines =
[coverage:html]
directory = coverage_html_report
+# To be kept consistent with "Import Formatting" section in contributing.rst
[isort]
known_pre_core=pandas._libs,pandas.util._*,pandas.compat,pandas.errors
known_dtypes=pandas.core.dtypes
| - [x] follow up from #23096
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23364 | 2018-10-26T16:54:43Z | 2018-11-04T15:30:44Z | 2018-11-04T15:30:44Z | 2018-11-04T15:30:47Z |
COMPAT: update for latest flake8 | diff --git a/pandas/core/groupby/__init__.py b/pandas/core/groupby/__init__.py
index 6f90fd1cff7e6..9c15a5ebfe0f2 100644
--- a/pandas/core/groupby/__init__.py
+++ b/pandas/core/groupby/__init__.py
@@ -1,4 +1,4 @@
-from pandas.core.groupby.groupby import GroupBy # flake8: noqa
-from pandas.core.groupby.generic import (
- SeriesGroupBy, DataFrameGroupBy, PanelGroupBy) # flake8: noqa
-from pandas.core.groupby.grouper import Grouper # flake8: noqa
+from pandas.core.groupby.groupby import GroupBy # noqa: F401
+from pandas.core.groupby.generic import ( # noqa: F401
+ SeriesGroupBy, DataFrameGroupBy, PanelGroupBy)
+from pandas.core.groupby.grouper import Grouper # noqa: F401
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 44163479dfd27..8871cac6f6af6 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -762,7 +762,7 @@ def ensure_clean(filename=None, return_filelike=False):
finally:
try:
os.close(fd)
- except Exception as e:
+ except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
| https://api.github.com/repos/pandas-dev/pandas/pulls/23363 | 2018-10-26T15:10:45Z | 2018-10-28T02:46:07Z | 2018-10-28T02:46:07Z | 2018-10-28T02:46:12Z | |
unpin openpyxl | diff --git a/ci/azure-36-locale_slow.yaml b/ci/azure-36-locale_slow.yaml
index 14b23dd6f3e4c..7e40bd1a9979e 100644
--- a/ci/azure-36-locale_slow.yaml
+++ b/ci/azure-36-locale_slow.yaml
@@ -14,7 +14,7 @@ dependencies:
- nomkl
- numexpr
- numpy
- - openpyxl=2.5.5
+ - openpyxl
- psycopg2
- pymysql
- pytables
diff --git a/ci/azure-37-locale.yaml b/ci/azure-37-locale.yaml
index ef97b85406709..59c8818eaef1e 100644
--- a/ci/azure-37-locale.yaml
+++ b/ci/azure-37-locale.yaml
@@ -13,7 +13,7 @@ dependencies:
- nomkl
- numexpr
- numpy
- - openpyxl=2.5.5
+ - openpyxl
- psycopg2
- pymysql
- pytables
diff --git a/ci/azure-macos-35.yaml b/ci/azure-macos-35.yaml
index 6ccdc79d11b27..065deb914dae6 100644
--- a/ci/azure-macos-35.yaml
+++ b/ci/azure-macos-35.yaml
@@ -12,7 +12,7 @@ dependencies:
- nomkl
- numexpr
- numpy=1.12.0
- - openpyxl=2.5.5
+ - openpyxl
- pytables
- python=3.5*
- pytz
diff --git a/ci/azure-windows-27.yaml b/ci/azure-windows-27.yaml
index d48a9ba986a93..dc68129a5e6d3 100644
--- a/ci/azure-windows-27.yaml
+++ b/ci/azure-windows-27.yaml
@@ -13,7 +13,7 @@ dependencies:
- matplotlib=2.0.1
- numexpr
- numpy=1.12*
- - openpyxl=2.5.5
+ - openpyxl
- pytables
- python=2.7.*
- pytz
diff --git a/ci/azure-windows-36.yaml b/ci/azure-windows-36.yaml
index d03a6cbbd662c..979443661f99b 100644
--- a/ci/azure-windows-36.yaml
+++ b/ci/azure-windows-36.yaml
@@ -11,7 +11,7 @@ dependencies:
- matplotlib
- numexpr
- numpy=1.14*
- - openpyxl=2.5.5
+ - openpyxl
- parquet-cpp
- pyarrow
- pytables
diff --git a/ci/circle-36-locale.yaml b/ci/circle-36-locale.yaml
index ef97b85406709..59c8818eaef1e 100644
--- a/ci/circle-36-locale.yaml
+++ b/ci/circle-36-locale.yaml
@@ -13,7 +13,7 @@ dependencies:
- nomkl
- numexpr
- numpy
- - openpyxl=2.5.5
+ - openpyxl
- psycopg2
- pymysql
- pytables
diff --git a/ci/requirements-optional-conda.txt b/ci/requirements-optional-conda.txt
index e9afd7a551b6e..04abfede67163 100644
--- a/ci/requirements-optional-conda.txt
+++ b/ci/requirements-optional-conda.txt
@@ -12,7 +12,7 @@ lxml
matplotlib>=2.0.0
nbsphinx
numexpr>=2.6.1
-openpyxl=2.5.5
+openpyxl
pyarrow
pymysql
pytables>=3.4.2
diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt
index ebe0c4ca88ee6..0153bdb6edf04 100644
--- a/ci/requirements-optional-pip.txt
+++ b/ci/requirements-optional-pip.txt
@@ -14,7 +14,7 @@ lxml
matplotlib>=2.0.0
nbsphinx
numexpr>=2.6.1
-openpyxl==2.5.5
+openpyxl
pyarrow
pymysql
tables
diff --git a/ci/travis-36-doc.yaml b/ci/travis-36-doc.yaml
index 8353659e7b9a9..f1f64546374af 100644
--- a/ci/travis-36-doc.yaml
+++ b/ci/travis-36-doc.yaml
@@ -22,7 +22,7 @@ dependencies:
- notebook
- numexpr
- numpy=1.13*
- - openpyxl=2.5.5
+ - openpyxl
- pandoc
- pyqt
- pytables
diff --git a/ci/travis-36-slow.yaml b/ci/travis-36-slow.yaml
index 1a7bc53e1b74b..3157ecac3a902 100644
--- a/ci/travis-36-slow.yaml
+++ b/ci/travis-36-slow.yaml
@@ -10,7 +10,7 @@ dependencies:
- matplotlib
- numexpr
- numpy
- - openpyxl=2.5.5
+ - openpyxl
- patsy
- psycopg2
- pymysql
diff --git a/ci/travis-36.yaml b/ci/travis-36.yaml
index 7aa27beacf976..257f830ec6c48 100644
--- a/ci/travis-36.yaml
+++ b/ci/travis-36.yaml
@@ -21,7 +21,7 @@ dependencies:
- nomkl
- numexpr
- numpy
- - openpyxl=2.5.5
+ - openpyxl
- psycopg2
- pyarrow
- pymysql
| - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- Follow up from #22601.
- Confirmed with openpyxl this bug was fixed in version 2.5.7 see [bitbucket issue here](https://bitbucket.org/openpyxl/openpyxl/issues/1093/newly-introduced-keyerror-problem-in)
This was release 13th Sept -> https://pypi.org/project/openpyxl/#history
| https://api.github.com/repos/pandas-dev/pandas/pulls/23361 | 2018-10-26T14:02:04Z | 2018-10-28T20:14:13Z | 2018-10-28T20:14:13Z | 2018-11-01T21:59:30Z |
TST: Fix test assertion | diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 6ebe3cb7487ab..029a77acb121f 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -27,6 +27,7 @@ def test_setitem_sequence(self, data, box_in_series):
@pytest.mark.parametrize('as_array', [True, False])
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
ser = pd.Series(data)
+ original = ser.copy()
value = [data[0]]
if as_array:
value = data._from_sequence(value)
@@ -34,11 +35,12 @@ def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
xpr = 'cannot set using a {} indexer with a different length'
with tm.assert_raises_regex(ValueError, xpr.format('list-like')):
ser[[0, 1]] = value
- assert ser._values[[0, 1]] == value
+ # Ensure no modifications made before the exception
+ self.assert_series_equal(ser, original)
with tm.assert_raises_regex(ValueError, xpr.format('slice')):
ser[slice(3)] = value
- assert ser._values[slice(3)] == value
+ self.assert_series_equal(ser, original)
def test_setitem_empty_indxer(self, data, box_in_series):
if box_in_series:
| xref https://github.com/pandas-dev/pandas/pull/23304#discussion_r228458689
cc @jorisvandenbossche | https://api.github.com/repos/pandas-dev/pandas/pulls/23357 | 2018-10-26T11:14:58Z | 2018-10-26T12:24:38Z | 2018-10-26T12:24:38Z | 2018-10-26T21:39:43Z |
DOC: fixup whatsnew note for GH21394 | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 51c398518c153..1b17b654e2130 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -201,7 +201,6 @@ Other Enhancements
- :func:`~DataFrame.to_csv`, :func:`~Series.to_csv`, :func:`~DataFrame.to_json`, and :func:`~Series.to_json` now support ``compression='infer'`` to infer compression based on filename extension (:issue:`15008`).
The default compression for ``to_csv``, ``to_json``, and ``to_pickle`` methods has been updated to ``'infer'`` (:issue:`22004`).
- :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`)
-- Comparing :class:`Timedelta` with unknown types now return ``NotImplemented`` instead of ``False`` (:issue:`20829`)
- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`)
- :class:`DatetimeIndex` gained :attr:`DatetimeIndex.timetz` attribute. Returns local time with timezone information. (:issue:`21358`)
- :meth:`round`, :meth:`ceil`, and meth:`floor` for :class:`DatetimeIndex` and :class:`Timestamp` now support an ``ambiguous`` argument for handling datetimes that are rounded to ambiguous times (:issue:`18946`)
@@ -910,6 +909,7 @@ Other API Changes
has an improved ``KeyError`` message, and will not fail on duplicate column names with ``drop=True``. (:issue:`22484`)
- Slicing a single row of a DataFrame with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`)
- :class:`DateOffset` attribute `_cacheable` and method `_should_cache` have been removed (:issue:`23118`)
+- Comparing :class:`Timedelta` to be less or greater than unknown types now raises a ``TypeError`` instead of returning ``False`` (:issue:`20829`)
- :meth:`Index.hasnans` and :meth:`Series.hasnans` now always return a python boolean. Previously, a python or a numpy boolean could be returned, depending on circumstances (:issue:`23294`).
.. _whatsnew_0240.deprecations:
| xref https://github.com/pandas-dev/pandas/pull/21394
(also moved from enhancements to api changes) | https://api.github.com/repos/pandas-dev/pandas/pulls/23355 | 2018-10-26T08:34:33Z | 2018-10-28T13:39:26Z | 2018-10-28T13:39:26Z | 2018-10-28T17:44:06Z |
BUG: Fix IntervalTree handling of NaN | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 768868d585721..b6c9ef52c1abe 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -1094,6 +1094,7 @@ Interval
- Bug in the ``IntervalIndex`` repr where a trailing comma was missing after the list of intervals (:issue:`20611`)
- Bug in :class:`Interval` where scalar arithmetic operations did not retain the ``closed`` value (:issue:`22313`)
- Bug in :class:`IntervalIndex` where indexing with datetime-like values raised a ``KeyError`` (:issue:`20636`)
+- Bug in ``IntervalTree`` where data containing ``NaN`` triggered a warning and resulted in incorrect indexing queries with :class:`IntervalIndex` (:issue:`23352`)
Indexing
^^^^^^^^
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in
index 9ed76242a95c3..875848c00311f 100644
--- a/pandas/_libs/intervaltree.pxi.in
+++ b/pandas/_libs/intervaltree.pxi.in
@@ -72,6 +72,12 @@ cdef class IntervalTree(IntervalMixin):
self.closed = closed
+ # GH 23352: ensure no nan in nodes
+ mask = ~np.isnan(self.left)
+ self.left = self.left[mask]
+ self.right = self.right[mask]
+ indices = indices[mask]
+
node_cls = NODE_CLASSES[str(self.dtype), closed]
self.root = node_cls(self.left, self.right, indices, leaf_size)
diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py
index 5f248bf7725e5..001a5109f7fc3 100644
--- a/pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas/tests/indexes/interval/test_interval_tree.py
@@ -13,10 +13,22 @@ def dtype(request):
return request.param
-@pytest.fixture(scope='class')
-def tree(dtype):
- left = np.arange(5, dtype=dtype)
- return IntervalTree(left, left + 2)
+@pytest.fixture(params=[1, 2, 10])
+def leaf_size(request):
+ return request.param
+
+
+@pytest.fixture(params=[
+ np.arange(5, dtype='int64'),
+ np.arange(5, dtype='int32'),
+ np.arange(5, dtype='uint64'),
+ np.arange(5, dtype='float64'),
+ np.arange(5, dtype='float32'),
+ np.array([0, 1, 2, 3, 4, np.nan], dtype='float64'),
+ np.array([0, 1, 2, 3, 4, np.nan], dtype='float32')])
+def tree(request, leaf_size):
+ left = request.param
+ return IntervalTree(left, left + 2, leaf_size=leaf_size)
class TestIntervalTree(object):
| - [X] closes #23352
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
xref #23327 : As far as I can tell the only reason that PR is failing is because of tests raising a `RuntimeWarning` due to this bug.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23353 | 2018-10-26T04:37:12Z | 2018-10-30T12:52:24Z | 2018-10-30T12:52:24Z | 2018-10-31T22:44:55Z |
Fix import format pandas/tests/indexing | diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 127548bdaf106..1fd9bf86ff6ee 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -2,15 +2,18 @@
import itertools
from warnings import catch_warnings, filterwarnings
-import pytest
+
import numpy as np
+import pytest
+from pandas import (
+ DataFrame, Float64Index, MultiIndex, Panel, Series, UInt64Index,
+ date_range
+)
from pandas.compat import lrange
from pandas.core.dtypes.common import is_scalar
-from pandas import (Series, DataFrame, Panel, date_range, UInt64Index,
- Float64Index, MultiIndex)
-from pandas.util import testing as tm
from pandas.io.formats.printing import pprint_thing
+from pandas.util import testing as tm
_verbose = False
diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py
index f2f59159032a2..67fa31637fc25 100644
--- a/pandas/tests/indexing/interval/test_interval.py
+++ b/pandas/tests/indexing/interval/test_interval.py
@@ -1,9 +1,9 @@
-import pytest
import numpy as np
-import pandas as pd
+import pytest
-from pandas import Series, DataFrame, IntervalIndex, Interval
+import pandas as pd
import pandas.util.testing as tm
+from pandas import DataFrame, Interval, IntervalIndex, Series
class TestIntervalIndex(object):
diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py
index 3eb5f38ba0c80..080f2abdad7de 100644
--- a/pandas/tests/indexing/interval/test_interval_new.py
+++ b/pandas/tests/indexing/interval/test_interval_new.py
@@ -1,9 +1,8 @@
-import pytest
import numpy as np
+import pytest
-from pandas import Series, IntervalIndex, Interval
import pandas.util.testing as tm
-
+from pandas import Interval, IntervalIndex, Series
pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316")
diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py
index 95b406517be62..d8f65c211a115 100644
--- a/pandas/tests/indexing/test_callable.py
+++ b/pandas/tests/indexing/test_callable.py
@@ -2,6 +2,7 @@
# pylint: disable-msg=W0612,E1101
import numpy as np
+
import pandas as pd
import pandas.util.testing as tm
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 634ad0d8160ed..85f95e79cc003 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -1,17 +1,19 @@
# -*- coding: utf-8 -*-
+import numpy as np
import pytest
import pandas as pd
import pandas.compat as compat
-import numpy as np
-from pandas import (Series, DataFrame, Timestamp, Categorical,
- CategoricalIndex, Interval, Index)
-from pandas.util.testing import assert_series_equal, assert_frame_equal
-from pandas.util import testing as tm
-from pandas.core.dtypes.common import is_categorical_dtype
+from pandas import (
+ Categorical, CategoricalIndex, DataFrame, Index, Interval, Series,
+ Timestamp
+)
from pandas.api.types import CategoricalDtype as CDT
+from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas.util import testing as tm
+from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestCategoricalIndex(object):
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index a7e55cdf9936e..3762585558082 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -1,10 +1,12 @@
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
+from pandas import (
+ DataFrame, MultiIndex, Series, Timestamp, compat, date_range,
+ option_context
+)
from pandas.core import common as com
-from pandas import (compat, DataFrame, option_context,
- Series, MultiIndex, date_range, Timestamp)
from pandas.util import testing as tm
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py
index 2f44cb36eeb11..643b6081d176a 100644
--- a/pandas/tests/indexing/test_coercion.py
+++ b/pandas/tests/indexing/test_coercion.py
@@ -1,13 +1,13 @@
# -*- coding: utf-8 -*-
import itertools
-import pytest
+
import numpy as np
+import pytest
import pandas as pd
-import pandas.util.testing as tm
import pandas.compat as compat
-
+import pandas.util.testing as tm
###############################################################
# Index / Series common tests which may trigger dtype coercions
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py
index df59390475da8..5fd1724f6e95c 100644
--- a/pandas/tests/indexing/test_datetime.py
+++ b/pandas/tests/indexing/test_datetime.py
@@ -1,10 +1,10 @@
from datetime import datetime, timedelta
import numpy as np
-import pandas as pd
from dateutil import tz
-from pandas import date_range, Index, DataFrame, Series, Timestamp
+import pandas as pd
+from pandas import DataFrame, Index, Series, Timestamp, date_range
from pandas.util import testing as tm
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 3773b432135b9..c7fe9959b048a 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -1,14 +1,15 @@
# -*- coding: utf-8 -*-
-import pytest
-
from warnings import catch_warnings
+
import numpy as np
-from pandas import (Series, DataFrame, Index, Float64Index, Int64Index,
- RangeIndex)
-from pandas.util.testing import assert_series_equal, assert_almost_equal
-import pandas.util.testing as tm
+import pytest
+import pandas.util.testing as tm
+from pandas import (
+ DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series
+)
+from pandas.util.testing import assert_almost_equal, assert_series_equal
ignore_ix = pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 538d9706d54d6..e2b6a37096493 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -1,16 +1,16 @@
""" test positional based indexing with iloc """
-import pytest
-
from warnings import catch_warnings, filterwarnings, simplefilter
+
import numpy as np
+import pytest
import pandas as pd
-from pandas.compat import lrange, lmap
-from pandas import Series, DataFrame, date_range, concat, isna
-from pandas.util import testing as tm
-from pandas.tests.indexing.common import Base
+from pandas import DataFrame, Series, concat, date_range, isna
from pandas.api.types import is_scalar
+from pandas.compat import lmap, lrange
+from pandas.tests.indexing.common import Base
+from pandas.util import testing as tm
class TestiLoc(Base):
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index def0da4fcd6bd..e96e249743468 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -3,28 +3,23 @@
""" test fancy indexing & misc """
-import pytest
-
import weakref
-from warnings import catch_warnings, simplefilter
from datetime import datetime
+from warnings import catch_warnings, simplefilter
-from pandas.core.dtypes.common import (
- is_integer_dtype,
- is_float_dtype)
-from pandas.compat import range, lrange, lzip, StringIO
import numpy as np
+import pytest
import pandas as pd
-from pandas.core.indexing import (_non_reducing_slice, _maybe_numeric_slice,
- validate_indices)
-from pandas import NaT, DataFrame, Index, Series, MultiIndex
import pandas.util.testing as tm
-from pandas.compat import PY2
-
+from pandas import DataFrame, Index, MultiIndex, NaT, Series
+from pandas.compat import PY2, StringIO, lrange, lzip, range
+from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
+from pandas.core.indexing import (
+ _maybe_numeric_slice, _non_reducing_slice, validate_indices
+)
from pandas.tests.indexing.common import Base, _mklbl
-
# ------------------------------------------------------------------------
# Indexing test cases
diff --git a/pandas/tests/indexing/test_indexing_slow.py b/pandas/tests/indexing/test_indexing_slow.py
index 61e5fdd7b9562..9715fc2bb5946 100644
--- a/pandas/tests/indexing/test_indexing_slow.py
+++ b/pandas/tests/indexing/test_indexing_slow.py
@@ -3,10 +3,11 @@
import warnings
import numpy as np
+import pytest
+
import pandas as pd
-from pandas.core.api import Series, DataFrame, MultiIndex
import pandas.util.testing as tm
-import pytest
+from pandas.core.api import DataFrame, MultiIndex, Series
class TestIndexingSlow(object):
diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py
index 04d0e04b5651e..0e99be31f6b93 100644
--- a/pandas/tests/indexing/test_ix.py
+++ b/pandas/tests/indexing/test_ix.py
@@ -1,17 +1,16 @@
""" test indexing with ix """
-import pytest
-
from warnings import catch_warnings
import numpy as np
-import pandas as pd
+import pytest
-from pandas.core.dtypes.common import is_scalar
+import pandas as pd
+from pandas import DataFrame, MultiIndex, Series, option_context
from pandas.compat import lrange
-from pandas import Series, DataFrame, option_context, MultiIndex
-from pandas.util import testing as tm
+from pandas.core.dtypes.common import is_scalar
from pandas.errors import PerformanceWarning
+from pandas.util import testing as tm
def test_ix_deprecation():
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 4a597967d3d5d..9314c2bcc8415 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -1,18 +1,17 @@
""" test label based indexing with loc """
import itertools
-import pytest
-
from warnings import catch_warnings, filterwarnings
+
import numpy as np
+import pytest
import pandas as pd
-from pandas.compat import lrange, StringIO
-from pandas import Series, DataFrame, Timestamp, date_range, MultiIndex, Index
-from pandas.util import testing as tm
-from pandas.tests.indexing.common import Base
+from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range
from pandas.api.types import is_scalar
-from pandas.compat import PY2
+from pandas.compat import PY2, StringIO, lrange
+from pandas.tests.indexing.common import Base
+from pandas.util import testing as tm
class TestLoc(Base):
diff --git a/pandas/tests/indexing/test_multiindex.py b/pandas/tests/indexing/test_multiindex.py
index b8f80164e5402..42d7b35a0a2fd 100644
--- a/pandas/tests/indexing/test_multiindex.py
+++ b/pandas/tests/indexing/test_multiindex.py
@@ -1,12 +1,15 @@
from warnings import catch_warnings
-import pytest
+
import numpy as np
+import pytest
+
import pandas as pd
-from pandas import (Panel, Series, MultiIndex, DataFrame,
- Timestamp, Index, date_range)
-from pandas.util import testing as tm
+from pandas import (
+ DataFrame, Index, MultiIndex, Panel, Series, Timestamp, date_range
+)
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.tests.indexing.common import _mklbl
+from pandas.util import testing as tm
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
diff --git a/pandas/tests/indexing/test_panel.py b/pandas/tests/indexing/test_panel.py
index 2cd05b5779f30..f8bc2b932df9d 100644
--- a/pandas/tests/indexing/test_panel.py
+++ b/pandas/tests/indexing/test_panel.py
@@ -1,9 +1,10 @@
-import pytest
from warnings import catch_warnings
import numpy as np
+import pytest
+
+from pandas import DataFrame, Panel, date_range
from pandas.util import testing as tm
-from pandas import Panel, date_range, DataFrame
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 5910f462cb3df..0d596e713fcfc 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -4,13 +4,13 @@
TOD: these should be split among the indexer tests
"""
-import pytest
-
from warnings import catch_warnings
+
import numpy as np
+import pytest
import pandas as pd
-from pandas import Series, DataFrame, Panel, Index, date_range
+from pandas import DataFrame, Index, Panel, Series, date_range
from pandas.util import testing as tm
diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py
index 91f006e23e878..d45209fd277f1 100644
--- a/pandas/tests/indexing/test_scalar.py
+++ b/pandas/tests/indexing/test_scalar.py
@@ -1,13 +1,11 @@
""" test scalar indexing, including at and iat """
-import pytest
-
import numpy as np
+import pytest
-from pandas import (Series, DataFrame, Timestamp,
- Timedelta, date_range)
-from pandas.util import testing as tm
+from pandas import DataFrame, Series, Timedelta, Timestamp, date_range
from pandas.tests.indexing.common import Base
+from pandas.util import testing as tm
class TestScalar(Base):
diff --git a/pandas/tests/indexing/test_timedelta.py b/pandas/tests/indexing/test_timedelta.py
index 48ea49119356d..29031c908bda4 100644
--- a/pandas/tests/indexing/test_timedelta.py
+++ b/pandas/tests/indexing/test_timedelta.py
@@ -1,8 +1,8 @@
+import numpy as np
import pytest
import pandas as pd
from pandas.util import testing as tm
-import numpy as np
class TestTimedeltaIndexing(object):
diff --git a/setup.cfg b/setup.cfg
index de3bd356e8f55..4ada87796d948 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -563,25 +563,6 @@ skip=
pandas/tests/reshape/merge/test_merge_asof.py,
pandas/tests/reshape/merge/test_join.py,
pandas/tests/reshape/merge/test_merge_ordered.py,
- pandas/tests/indexing/test_multiindex.py,
- pandas/tests/indexing/test_indexing.py,
- pandas/tests/indexing/test_scalar.py,
- pandas/tests/indexing/test_timedelta.py,
- pandas/tests/indexing/test_callable.py,
- pandas/tests/indexing/test_datetime.py,
- pandas/tests/indexing/test_ix.py,
- pandas/tests/indexing/test_iloc.py,
- pandas/tests/indexing/test_partial.py,
- pandas/tests/indexing/test_indexing_slow.py,
- pandas/tests/indexing/test_loc.py,
- pandas/tests/indexing/test_floats.py,
- pandas/tests/indexing/test_coercion.py,
- pandas/tests/indexing/common.py,
- pandas/tests/indexing/test_chaining_and_caching.py,
- pandas/tests/indexing/test_panel.py,
- pandas/tests/indexing/test_categorical.py,
- pandas/tests/indexing/interval/test_interval_new.py,
- pandas/tests/indexing/interval/test_interval.py,
pandas/tests/sparse/test_indexing.py,
pandas/tests/arrays/sparse/test_libsparse.py,
pandas/tests/arrays/sparse/test_array.py,
| - [x] xref #23334
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Ran `isort --recursive pandas` and then checked imports using `isort --recursive --check-only pandas`
PR capped at 20 files, but 21 files changed because it includes setup.cfg which has also been changed.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23351 | 2018-10-26T01:53:26Z | 2018-10-28T02:55:11Z | 2018-10-28T02:55:11Z | 2018-10-28T02:55:11Z |
BUG: Handle Period in combine | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 56bf394729773..46c8126f65fec 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -6,7 +6,7 @@
import warnings
from pandas._libs import tslib, lib, tslibs
-from pandas._libs.tslibs import iNaT, OutOfBoundsDatetime
+from pandas._libs.tslibs import iNaT, OutOfBoundsDatetime, Period
from pandas.compat import string_types, text_type, PY3
from .common import (ensure_object, is_bool, is_integer, is_float,
is_complex, is_datetimetz, is_categorical_dtype,
@@ -164,6 +164,12 @@ def trans(x): # noqa
result = to_datetime(result).tz_localize('utc')
result = result.tz_convert(dtype.tz)
+ elif dtype.type == Period:
+ # TODO(DatetimeArray): merge with previous elif
+ from pandas.core.arrays import PeriodArray
+
+ return PeriodArray(result, freq=dtype.freq)
+
except Exception:
pass
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index 2803db4f496a5..3b8d6e6c55ed1 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -759,7 +759,6 @@ def test_combine_first_timedelta(self):
tm.assert_frame_equal(res, exp)
assert res['TD'].dtype == 'timedelta64[ns]'
- @pytest.mark.xfail(reason="GH-23079", strict=True)
def test_combine_first_period(self):
data1 = pd.PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M')
| Followup to #23079 to unxfail a period test. | https://api.github.com/repos/pandas-dev/pandas/pulls/23350 | 2018-10-26T01:33:38Z | 2018-10-26T12:24:03Z | 2018-10-26T12:24:03Z | 2018-10-26T12:24:03Z |
API/TST: make hasnans always return python booleans | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 768868d585721..9a781eda0e397 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -910,6 +910,7 @@ Other API Changes
has an improved ``KeyError`` message, and will not fail on duplicate column names with ``drop=True``. (:issue:`22484`)
- Slicing a single row of a DataFrame with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`)
- :class:`DateOffset` attribute `_cacheable` and method `_should_cache` have been removed (:issue:`23118`)
+- :meth:`Index.hasnans` and :meth:`Series.hasnans` now always return a python boolean. Previously, a python or a numpy boolean could be returned, depending on circumstances (:issue:`23294`).
.. _whatsnew_0240.deprecations:
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 943c8a94e1e6a..28fe6471efb73 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -219,7 +219,7 @@ def _isnan(self):
@property # NB: override with cache_readonly in immutable subclasses
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
- return self._isnan.any()
+ return bool(self._isnan.any())
def _maybe_mask_results(self, result, fill_value=None, convert=None):
"""
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 91ae8375c233a..88a36b0ecc7c7 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -889,7 +889,7 @@ def __iter__(self):
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
- return isna(self).any()
+ return bool(isna(self).any())
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index e9b0b087179c9..a9edad1fa2e01 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2221,7 +2221,7 @@ def _nan_idxs(self):
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
- return self._isnan.any()
+ return bool(self._isnan.any())
else:
return False
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index dc936af04e045..e8eaca2b61dd7 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -417,7 +417,7 @@ def test_get_unique_index(self, indices):
# and doesn't contain nans.
assert idx_unique.is_unique is True
try:
- assert not idx_unique.hasnans
+ assert idx_unique.hasnans is False
except NotImplementedError:
pass
@@ -916,7 +916,7 @@ def test_hasnans_isnans(self):
# cases in indices doesn't include NaN
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
- assert not idx.hasnans
+ assert idx.hasnans is False
idx = index.copy()
values = np.asarray(idx.values)
@@ -938,7 +938,7 @@ def test_hasnans_isnans(self):
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
- assert idx.hasnans
+ assert idx.hasnans is True
def test_fillna(self):
# GH 11343
@@ -978,7 +978,7 @@ def test_fillna(self):
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
- assert idx.hasnans
+ assert idx.hasnans is True
def test_nulls(self):
# this is really a smoke test for the methods
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 9ce77326d37b7..ad44ceab36bc3 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -356,7 +356,7 @@ def test_nat(self, tz_naive_fixture):
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
- assert not idx.hasnans
+ assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
@@ -364,7 +364,7 @@ def test_nat(self, tz_naive_fixture):
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
- assert idx.hasnans
+ assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index f33106e61662f..f6ed658251dc7 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -93,7 +93,7 @@ def test_length(self, closed, breaks):
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
- assert not index.hasnans
+ assert index.hasnans is False
result = index.isna()
expected = np.repeat(False, len(index))
@@ -104,7 +104,7 @@ def test_with_nans(self, closed):
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
- assert index.hasnans
+ assert index.hasnans is True
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py
index bedacf84f4f9a..82c486caf2631 100644
--- a/pandas/tests/indexes/multi/test_missing.py
+++ b/pandas/tests/indexes/multi/test_missing.py
@@ -49,7 +49,7 @@ def test_fillna(idx):
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
- assert idx.hasnans
+ assert idx.hasnans is True
def test_dropna():
@@ -91,7 +91,7 @@ def test_hasnans_isnans(idx):
# cases in indices doesn't include NaN
expected = np.array([False] * len(index), dtype=bool)
tm.assert_numpy_array_equal(index._isnan, expected)
- assert not index.hasnans
+ assert index.hasnans is False
index = idx.copy()
values = index.values
@@ -102,7 +102,7 @@ def test_hasnans_isnans(idx):
expected = np.array([False] * len(index), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(index._isnan, expected)
- assert index.hasnans
+ assert index.hasnans is True
def test_nan_stays_float():
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index 33858a28ec81b..276c919dd1d12 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -356,7 +356,7 @@ def test_nat(self):
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
- assert not idx.hasnans
+ assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
@@ -364,7 +364,7 @@ def test_nat(self):
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
- assert idx.hasnans
+ assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 9f8a3e893c3de..4b8c37cceb444 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -273,7 +273,7 @@ def test_nat(self):
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
- assert not idx.hasnans
+ assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
@@ -281,7 +281,7 @@ def test_nat(self):
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
- assert idx.hasnans
+ assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py
index 506e7e14ffc4f..882755f6a71c0 100644
--- a/pandas/tests/series/test_internals.py
+++ b/pandas/tests/series/test_internals.py
@@ -315,11 +315,11 @@ def test_convert_preserve_all_bool(self):
def test_hasnans_unchached_for_series():
# GH#19700
idx = pd.Index([0, 1])
- assert not idx.hasnans
+ assert idx.hasnans is False
assert 'hasnans' in idx._cache
ser = idx.to_series()
- assert not ser.hasnans
+ assert ser.hasnans is False
assert not hasattr(ser, '_cache')
ser.iloc[-1] = np.nan
- assert ser.hasnans
+ assert ser.hasnans is True
assert pd.Series.hasnans.__doc__ == pd.Index.hasnans.__doc__
| - [x] xref #23294
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
``Index.hasnans`` currently returns either a python or a numpy boolean, depending on circumstances. This PR ensures that only python booleans ae returned and makes the tests for hasnans stricter.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23349 | 2018-10-26T00:33:04Z | 2018-10-27T15:47:26Z | 2018-10-27T15:47:26Z | 2018-10-27T15:54:05Z |
Run Isort on tests/util single PR | diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index 403fed418cae2..49f8fa30ecb6a 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -1,13 +1,13 @@
-import pytest
import datetime
import numpy as np
-import pandas as pd
+import pytest
-from pandas import DataFrame, Series, Index, MultiIndex
-from pandas.util import hash_array, hash_pandas_object
-from pandas.core.util.hashing import hash_tuples, hash_tuple, _hash_scalar
+import pandas as pd
import pandas.util.testing as tm
+from pandas import DataFrame, Index, MultiIndex, Series
+from pandas.core.util.hashing import _hash_scalar, hash_tuple, hash_tuples
+from pandas.util import hash_array, hash_pandas_object
class TestHashing(object):
diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py
index d968005a25006..db2fc5ec868c6 100644
--- a/pandas/tests/util/test_testing.py
+++ b/pandas/tests/util/test_testing.py
@@ -1,18 +1,19 @@
# -*- coding: utf-8 -*-
-import textwrap
import os
-import pandas as pd
-import pytest
-import numpy as np
import sys
-from pandas import Series, DataFrame
-import pandas.util.testing as tm
+import textwrap
+
+import numpy as np
+import pytest
+
+import pandas as pd
import pandas.util._test_decorators as td
-from pandas.util.testing import (assert_almost_equal, raise_with_traceback,
- assert_index_equal, assert_series_equal,
- assert_frame_equal, assert_numpy_array_equal,
- RNGContext)
-from pandas import compat
+import pandas.util.testing as tm
+from pandas import DataFrame, Series, compat
+from pandas.util.testing import (
+ RNGContext, assert_almost_equal, assert_frame_equal, assert_index_equal,
+ assert_numpy_array_equal, assert_series_equal, raise_with_traceback
+)
class TestAssertAlmostEqual(object):
diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py
index 6552655110557..61b3cc526d6d9 100644
--- a/pandas/tests/util/test_util.py
+++ b/pandas/tests/util/test_util.py
@@ -1,22 +1,23 @@
# -*- coding: utf-8 -*-
-import os
-import locale
import codecs
+import locale
+import os
import sys
-from uuid import uuid4
from collections import OrderedDict
+from uuid import uuid4
import pytest
-from pandas.compat import intern, PY3
-import pandas.core.common as com
-from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf
-from pandas.util._decorators import deprecate_kwarg, make_signature
-from pandas.util._validators import (validate_args, validate_kwargs,
- validate_args_and_kwargs,
- validate_bool_kwarg)
-import pandas.util.testing as tm
+import pandas.core.common as com
import pandas.util._test_decorators as td
+import pandas.util.testing as tm
+from pandas.compat import PY3, intern
+from pandas.util._decorators import deprecate_kwarg, make_signature
+from pandas.util._move import BadMove, move_into_mutable_buffer, stolenbuf
+from pandas.util._validators import (
+ validate_args, validate_args_and_kwargs, validate_bool_kwarg,
+ validate_kwargs
+)
class TestDecorators(object):
| - [x] partial #23334
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Run `isort -rc pandas/tests/util/`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23347 | 2018-10-25T23:30:59Z | 2018-10-28T03:16:40Z | 2018-10-28T03:16:40Z | 2019-01-02T20:26:20Z |
Run Isort on tests/arrays single PR | diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py
index d4a70e9a1ec2e..e7dc67c5d6a5b 100644
--- a/pandas/tests/arrays/categorical/test_algos.py
+++ b/pandas/tests/arrays/categorical/test_algos.py
@@ -1,5 +1,5 @@
-import pytest
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py
index b1b2e609f9b07..18cce6573817c 100644
--- a/pandas/tests/arrays/categorical/test_analytics.py
+++ b/pandas/tests/arrays/categorical/test_analytics.py
@@ -1,13 +1,12 @@
# -*- coding: utf-8 -*-
-import pytest
import sys
import numpy as np
+import pytest
import pandas.util.testing as tm
from pandas import Categorical, Index, Series
-
from pandas.compat import PYPY
diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py
index 037f01733b51c..1deef4762be26 100644
--- a/pandas/tests/arrays/categorical/test_api.py
+++ b/pandas/tests/arrays/categorical/test_api.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
-import pytest
-
import numpy as np
+import pytest
import pandas.util.testing as tm
-from pandas import Categorical, CategoricalIndex, Index, Series, DataFrame
-
+from pandas import Categorical, CategoricalIndex, DataFrame, Index, Series
from pandas.core.arrays.categorical import _recode_for_categories
from pandas.tests.arrays.categorical.common import TestCategorical
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 998c1182c013a..b53fd0aa80344 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -1,18 +1,19 @@
# -*- coding: utf-8 -*-
-import pytest
from datetime import datetime
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
-from pandas import (Categorical, Index, Series, Timestamp,
- CategoricalIndex, date_range, DatetimeIndex,
- period_range, timedelta_range, NaT,
- Interval, IntervalIndex)
-from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas import (
+ Categorical, CategoricalIndex, DatetimeIndex, Index, Interval,
+ IntervalIndex, NaT, Series, Timestamp, date_range, period_range,
+ timedelta_range
+)
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
+from pandas.core.dtypes.dtypes import CategoricalDtype
class TestCategoricalConstructors(object):
diff --git a/pandas/tests/arrays/categorical/test_dtypes.py b/pandas/tests/arrays/categorical/test_dtypes.py
index 00e99db628c2a..7d4824fe7d18a 100644
--- a/pandas/tests/arrays/categorical/test_dtypes.py
+++ b/pandas/tests/arrays/categorical/test_dtypes.py
@@ -1,12 +1,11 @@
# -*- coding: utf-8 -*-
-import pytest
-
import numpy as np
+import pytest
import pandas.util.testing as tm
-from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas import Categorical, CategoricalIndex, Index, Series, Timestamp
from pandas.compat import long
-from pandas import Categorical, Index, CategoricalIndex, Series, Timestamp
+from pandas.core.dtypes.dtypes import CategoricalDtype
class TestCategoricalDtypes(object):
diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py
index d23da1565a952..d74a7d5d2ed5d 100644
--- a/pandas/tests/arrays/categorical/test_indexing.py
+++ b/pandas/tests/arrays/categorical/test_indexing.py
@@ -1,12 +1,11 @@
# -*- coding: utf-8 -*-
-import pytest
-
import numpy as np
+import pytest
-import pandas.util.testing as tm
-from pandas import Categorical, Index, CategoricalIndex, PeriodIndex, Series
import pandas.core.common as com
+import pandas.util.testing as tm
+from pandas import Categorical, CategoricalIndex, Index, PeriodIndex, Series
from pandas.tests.arrays.categorical.common import TestCategorical
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index 6d7a98f146679..a4b39846cbfaf 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -1,12 +1,11 @@
# -*- coding: utf-8 -*-
+import numpy as np
import pytest
import pandas as pd
-import numpy as np
-
import pandas.util.testing as tm
-from pandas import Categorical, Series, DataFrame, date_range
+from pandas import Categorical, DataFrame, Series, date_range
from pandas.tests.arrays.categorical.common import TestCategorical
diff --git a/pandas/tests/arrays/categorical/test_repr.py b/pandas/tests/arrays/categorical/test_repr.py
index 520d6637c0310..3c830ee6f6da5 100644
--- a/pandas/tests/arrays/categorical/test_repr.py
+++ b/pandas/tests/arrays/categorical/test_repr.py
@@ -2,9 +2,11 @@
import numpy as np
-from pandas import (Categorical, Series, CategoricalIndex, date_range,
- period_range, timedelta_range)
-from pandas.compat import u, PY3
+from pandas import (
+ Categorical, CategoricalIndex, Series, date_range, period_range,
+ timedelta_range
+)
+from pandas.compat import PY3, u
from pandas.core.config import option_context
from pandas.tests.arrays.categorical.common import TestCategorical
diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py
index 388411f909bac..2aa24fff3d1d7 100644
--- a/pandas/tests/arrays/sparse/test_arithmetics.py
+++ b/pandas/tests/arrays/sparse/test_arithmetics.py
@@ -2,6 +2,7 @@
import numpy as np
import pytest
+
import pandas as pd
import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py
index cc9512c0759fc..45c23735a986b 100644
--- a/pandas/tests/arrays/sparse/test_array.py
+++ b/pandas/tests/arrays/sparse/test_array.py
@@ -1,19 +1,18 @@
-from pandas.compat import range
-
-import re
import operator
-import pytest
+import re
import warnings
-from numpy import nan
import numpy as np
-import pandas as pd
+import pytest
+from numpy import nan
-from pandas.core.sparse.api import SparseArray, SparseSeries, SparseDtype
+import pandas as pd
+import pandas.util._test_decorators as td
+import pandas.util.testing as tm
from pandas._libs.sparse import IntIndex
+from pandas.compat import range
+from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
from pandas.util.testing import assert_almost_equal
-import pandas.util.testing as tm
-import pandas.util._test_decorators as td
@pytest.fixture(params=["integer", "block"])
diff --git a/pandas/tests/arrays/sparse/test_dtype.py b/pandas/tests/arrays/sparse/test_dtype.py
index 0dcfc3ae79b0f..d3e2116882739 100644
--- a/pandas/tests/arrays/sparse/test_dtype.py
+++ b/pandas/tests/arrays/sparse/test_dtype.py
@@ -1,5 +1,5 @@
-import pytest
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py
index 3d867cdda1d42..e6ebd92c6ffdc 100644
--- a/pandas/tests/arrays/sparse/test_libsparse.py
+++ b/pandas/tests/arrays/sparse/test_libsparse.py
@@ -1,13 +1,13 @@
-from pandas import Series
+import operator
-import pytest
import numpy as np
-import operator
-import pandas.util.testing as tm
-import pandas.util._test_decorators as td
+import pytest
-from pandas.core.arrays.sparse import IntIndex, BlockIndex, _make_index
import pandas._libs.sparse as splib
+import pandas.util._test_decorators as td
+import pandas.util.testing as tm
+from pandas import Series
+from pandas.core.arrays.sparse import BlockIndex, IntIndex, _make_index
TEST_LENGTH = 20
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index 8baf53e65ba22..3d5c810402fba 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -4,10 +4,9 @@
import pandas as pd
import pandas.util.testing as tm
-
-from pandas.core.arrays import (DatetimeArrayMixin,
- TimedeltaArrayMixin,
- PeriodArray)
+from pandas.core.arrays import (
+ DatetimeArrayMixin, PeriodArray, TimedeltaArrayMixin
+)
# TODO: more freq variants
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 10961173d4b6b..41ec2d3026499 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -1,18 +1,16 @@
# -*- coding: utf-8 -*-
import numpy as np
-import pandas as pd
-import pandas.util.testing as tm
import pytest
-from pandas.api.types import is_integer, is_float, is_float_dtype, is_scalar
-from pandas.core.dtypes.generic import ABCIndexClass
-
-from pandas.core.arrays import (
- integer_array, IntegerArray)
+import pandas as pd
+import pandas.util.testing as tm
+from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
+from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
- Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype,
- UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype)
-
+ Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype,
+ UInt32Dtype, UInt64Dtype
+)
+from pandas.core.dtypes.generic import ABCIndexClass
from pandas.tests.extension.base import BaseOpsUtil
diff --git a/setup.cfg b/setup.cfg
index 1e34d30c154bd..162c6983f9e29 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -250,16 +250,8 @@ skip=
pandas/tests/arrays/test_datetimelike.py,
pandas/tests/arrays/test_integer.py,
pandas/tests/arrays/test_interval.py,
- pandas/tests/arrays/categorical/test_indexing.py,
- pandas/tests/arrays/categorical/test_sorting.py,
- pandas/tests/arrays/categorical/test_operators.py,
- pandas/tests/arrays/categorical/test_algos.py,
- pandas/tests/arrays/categorical/test_dtypes.py,
- pandas/tests/arrays/categorical/test_repr.py,
- pandas/tests/arrays/categorical/test_analytics.py,
pandas/tests/arrays/categorical/test_missing.py,
- pandas/tests/arrays/categorical/test_api.py,
- pandas/tests/arrays/categorical/test_constructors.py,
+ pandas/tests/arrays/categorical/test_sorting.py,
pandas/tests/util/test_testing.py,
pandas/tests/util/test_util.py,
pandas/tests/util/test_hashing.py,
@@ -461,16 +453,12 @@ skip=
pandas/tests/reshape/merge/test_join.py,
pandas/tests/reshape/merge/test_merge_ordered.py,
pandas/tests/sparse/test_indexing.py,
- pandas/tests/arrays/sparse/test_libsparse.py,
- pandas/tests/arrays/sparse/test_array.py,
- pandas/tests/arrays/sparse/test_dtype.py,
pandas/tests/extension/test_sparse.py,
pandas/tests/extension/base/reduce.py,
pandas/tests/sparse/test_reshape.py,
pandas/tests/sparse/test_pivot.py,
pandas/tests/sparse/test_format.py,
pandas/tests/sparse/test_groupby.py,
- pandas/tests/arrays/sparse/test_arithmetics.py,
pandas/tests/sparse/test_combine_concat.py,
pandas/tests/sparse/series/test_indexing.py,
pandas/tests/sparse/series/test_series.py,
| - [x] partial #23334
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Run `isort -rc pandas/tests/arrays/` | https://api.github.com/repos/pandas-dev/pandas/pulls/23346 | 2018-10-25T22:17:07Z | 2018-10-28T03:09:21Z | 2018-10-28T03:09:21Z | 2019-01-02T20:26:19Z |
BUG: Fix date_range overflow | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 70725a347c9d0..b8d63e03e3318 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -1027,6 +1027,7 @@ Datetimelike
- Bug in :class:`PeriodIndex` where adding or subtracting a :class:`timedelta` or :class:`Tick` object produced incorrect results (:issue:`22988`)
- Bug in :func:`date_range` when decrementing a start date to a past end date by a negative frequency (:issue:`23270`)
- Bug in :func:`DataFrame.combine` with datetimelike values raising a TypeError (:issue:`23079`)
+- Bug in :func:`date_range` with frequency of ``Day`` or higher where dates sufficiently far in the future could wrap around to the past instead of raising ``OutOfBoundsDatetime`` (:issue:`14187`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index cd20bcbed2211..2392bbdd87f7a 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -1355,11 +1355,11 @@ def _generate_regular_range(cls, start, end, periods, freq):
tz = start.tz
elif start is not None:
b = Timestamp(start).value
- e = b + np.int64(periods) * stride
+ e = _generate_range_overflow_safe(b, periods, stride, side='start')
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
- b = e - np.int64(periods) * stride
+ b = _generate_range_overflow_safe(e, periods, stride, side='end')
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
@@ -1384,6 +1384,44 @@ def _generate_regular_range(cls, start, end, periods, freq):
return data
+def _generate_range_overflow_safe(endpoint, periods, stride, side='start'):
+ """
+ Calculate the second endpoint for passing to np.arange, checking
+ to avoid an integer overflow. Catch OverflowError and re-raise
+ as OutOfBoundsDatetime.
+
+ Parameters
+ ----------
+ endpoint : int
+ periods : int
+ stride : int
+ side : {'start', 'end'}
+
+ Returns
+ -------
+ other_end : int
+
+ Raises
+ ------
+ OutOfBoundsDatetime
+ """
+ # GH#14187 raise instead of incorrectly wrapping around
+ assert side in ['start', 'end']
+ if side == 'end':
+ stride *= -1
+
+ try:
+ other_end = checked_add_with_arr(np.int64(endpoint),
+ np.int64(periods) * stride)
+ except OverflowError:
+ raise tslib.OutOfBoundsDatetime('Cannot generate range with '
+ '{side}={endpoint} and '
+ 'periods={periods}'
+ .format(side=side, endpoint=endpoint,
+ periods=periods))
+ return other_end
+
+
def _infer_tz_from_endpoints(start, end, tz):
"""
If a timezone is not explicitly given via `tz`, see if one can
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index 657d8ffe0cd38..450d7643bfbd5 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -15,6 +15,7 @@
from pandas import (
DatetimeIndex, Timestamp, bdate_range, compat, date_range, offsets
)
+from pandas.errors import OutOfBoundsDatetime
from pandas.tests.series.common import TestData
from pandas.tseries.offsets import (
BDay, CDay, DateOffset, MonthEnd, generate_range, prefix_mapping
@@ -79,6 +80,12 @@ def test_date_range_timestamp_equiv_preserve_frequency(self):
class TestDateRanges(TestData):
+ def test_date_range_out_of_bounds(self):
+ # GH#14187
+ with pytest.raises(OutOfBoundsDatetime):
+ date_range('2016-01-01', periods=100000, freq='D')
+ with pytest.raises(OutOfBoundsDatetime):
+ date_range(end='1763-10-12', periods=100000, freq='D')
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
| - [x] closes #14187
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23345 | 2018-10-25T22:09:13Z | 2018-10-28T03:14:14Z | 2018-10-28T03:14:14Z | 2018-10-28T05:12:34Z |
Run Isort on tests/series single pr | diff --git a/pandas/tests/series/common.py b/pandas/tests/series/common.py
index 0c25dcb29c3b2..ec7558e41ab40 100644
--- a/pandas/tests/series/common.py
+++ b/pandas/tests/series/common.py
@@ -1,6 +1,6 @@
-from pandas.util._decorators import cache_readonly
-import pandas.util.testing as tm
import pandas as pd
+import pandas.util.testing as tm
+from pandas.util._decorators import cache_readonly
_ts = tm.makeTimeSeries()
diff --git a/pandas/tests/series/conftest.py b/pandas/tests/series/conftest.py
index 80a4e81c443ed..352e4df54fe5b 100644
--- a/pandas/tests/series/conftest.py
+++ b/pandas/tests/series/conftest.py
@@ -1,7 +1,6 @@
import pytest
import pandas.util.testing as tm
-
from pandas import Series
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py
index 561d6a9b42508..ea6788a1a36ee 100644
--- a/pandas/tests/series/indexing/test_alter_index.py
+++ b/pandas/tests/series/indexing/test_alter_index.py
@@ -1,22 +1,17 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
from datetime import datetime
-import pandas as pd
import numpy as np
-
+import pytest
from numpy import nan
-from pandas import compat
-
-from pandas import (Series, date_range, isna, Categorical)
-from pandas.compat import lrange, range
-
-from pandas.util.testing import (assert_series_equal)
+import pandas as pd
import pandas.util.testing as tm
+from pandas import Categorical, Series, compat, date_range, isna
+from pandas.compat import lrange, range
+from pandas.util.testing import assert_series_equal
@pytest.mark.parametrize(
diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py
index e2a9b3586648d..df92851f43227 100644
--- a/pandas/tests/series/indexing/test_boolean.py
+++ b/pandas/tests/series/indexing/test_boolean.py
@@ -1,20 +1,17 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
+import numpy as np
import pytest
import pandas as pd
-import numpy as np
-
-from pandas import (Series, date_range, isna, Index, Timestamp)
+import pandas.util.testing as tm
+from pandas import Index, Series, Timestamp, date_range, isna
from pandas.compat import lrange, range
from pandas.core.dtypes.common import is_integer
-
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
-
-from pandas.util.testing import (assert_series_equal)
-import pandas.util.testing as tm
+from pandas.util.testing import assert_series_equal
def test_getitem_boolean(test_data):
diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py
index d1f022ef982c0..d63150165d7d3 100644
--- a/pandas/tests/series/indexing/test_datetime.py
+++ b/pandas/tests/series/indexing/test_datetime.py
@@ -1,24 +1,20 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
from datetime import datetime, timedelta
import numpy as np
-import pandas as pd
-
-from pandas import (Series, DataFrame,
- date_range, Timestamp, DatetimeIndex, NaT)
-
-from pandas.compat import lrange, range
-from pandas.util.testing import (assert_series_equal,
- assert_frame_equal, assert_almost_equal)
-
-import pandas.util.testing as tm
+import pytest
+import pandas as pd
import pandas._libs.index as _index
+import pandas.util.testing as tm
+from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
from pandas._libs import tslib
+from pandas.compat import lrange, range
+from pandas.util.testing import (
+ assert_almost_equal, assert_frame_equal, assert_series_equal
+)
"""
diff --git a/pandas/tests/series/indexing/test_iloc.py b/pandas/tests/series/indexing/test_iloc.py
index 648a37ce0262b..64ef2078cbe78 100644
--- a/pandas/tests/series/indexing/test_iloc.py
+++ b/pandas/tests/series/indexing/test_iloc.py
@@ -4,10 +4,8 @@
import numpy as np
from pandas import Series
-
from pandas.compat import lrange, range
-from pandas.util.testing import (assert_series_equal,
- assert_almost_equal)
+from pandas.util.testing import assert_almost_equal, assert_series_equal
def test_iloc():
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index 6e491cbb8ba79..e926e477d0bc4 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -3,22 +3,20 @@
""" test get/set & misc """
-import pytest
-
from datetime import timedelta
import numpy as np
-import pandas as pd
+import pytest
+import pandas as pd
+import pandas.util.testing as tm
+from pandas import (
+ Categorical, DataFrame, MultiIndex, Series, Timedelta, Timestamp
+)
+from pandas.compat import lrange, range
from pandas.core.dtypes.common import is_scalar
-from pandas import (Series, DataFrame, MultiIndex,
- Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
-
-from pandas.compat import lrange, range
-
-from pandas.util.testing import (assert_series_equal)
-import pandas.util.testing as tm
+from pandas.util.testing import assert_series_equal
def test_basic_indexing():
diff --git a/pandas/tests/series/indexing/test_loc.py b/pandas/tests/series/indexing/test_loc.py
index 088406e0a1db6..596b5d022c68d 100644
--- a/pandas/tests/series/indexing/test_loc.py
+++ b/pandas/tests/series/indexing/test_loc.py
@@ -1,15 +1,13 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
-
-from pandas import (Series, Timestamp)
-
+from pandas import Series, Timestamp
from pandas.compat import lrange
-from pandas.util.testing import (assert_series_equal)
+from pandas.util.testing import assert_series_equal
def test_loc_getitem(test_data):
diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py
index 6df63c3981af3..0b61274801de1 100644
--- a/pandas/tests/series/indexing/test_numeric.py
+++ b/pandas/tests/series/indexing/test_numeric.py
@@ -1,17 +1,14 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
-
-from pandas import (Index, Series, DataFrame)
-
-from pandas.compat import lrange, range
-from pandas.util.testing import (assert_series_equal)
-
import pandas.util.testing as tm
+from pandas import DataFrame, Index, Series
+from pandas.compat import lrange, range
+from pandas.util.testing import assert_series_equal
def test_get():
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py
index 7d4aa2d4df6fc..5193307a3c256 100644
--- a/pandas/tests/series/test_alter_axes.py
+++ b/pandas/tests/series/test_alter_axes.py
@@ -1,16 +1,14 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
from datetime import datetime
import numpy as np
+import pytest
-from pandas import Series, DataFrame, Index, MultiIndex, RangeIndex
-
-from pandas.compat import lrange, range, zip
import pandas.util.testing as tm
+from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series
+from pandas.compat import lrange, range, zip
class TestSeriesAlterAxes(object):
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index a00a9816c70c0..c84cf2ee50e4b 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -1,28 +1,31 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-from itertools import product
-from distutils.version import LooseVersion
import operator
-import pytest
+from distutils.version import LooseVersion
+from itertools import product
-from numpy import nan
import numpy as np
-import pandas as pd
+import pytest
+from numpy import nan
-from pandas import (Series, Categorical, DataFrame, isna, notna,
- bdate_range, date_range, CategoricalIndex)
+import pandas as pd
+import pandas.core.nanops as nanops
+import pandas.util._test_decorators as td
+import pandas.util.testing as tm
+from pandas import (
+ Categorical, CategoricalIndex, DataFrame, Series, bdate_range, compat,
+ date_range, isna, notna
+)
+from pandas.compat import PY35, lrange, range
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
-import pandas.core.nanops as nanops
+from pandas.util.testing import (
+ assert_almost_equal, assert_frame_equal, assert_index_equal,
+ assert_series_equal
+)
-from pandas.compat import lrange, range, PY35
-from pandas import compat
-from pandas.util.testing import (assert_series_equal, assert_almost_equal,
- assert_frame_equal, assert_index_equal)
-import pandas.util.testing as tm
-import pandas.util._test_decorators as td
from .common import TestData
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 3e68d4fc03f1f..32181fe3dad39 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -1,25 +1,23 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-from collections import OrderedDict
-import warnings
import pydoc
+import warnings
+from collections import OrderedDict
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
-
-from pandas import Index, Series, DataFrame, date_range
-from pandas.core.indexes.datetimes import Timestamp
-
-from pandas.compat import range, lzip, isidentifier, string_types
-from pandas import (compat, Categorical, period_range, timedelta_range,
- DatetimeIndex, TimedeltaIndex)
-from pandas.core.arrays import PeriodArray
import pandas.io.formats.printing as printing
-from pandas.util.testing import (assert_series_equal,
- ensure_clean)
import pandas.util.testing as tm
+from pandas import (
+ Categorical, DataFrame, DatetimeIndex, Index, Series, TimedeltaIndex,
+ compat, date_range, period_range, timedelta_range
+)
+from pandas.compat import isidentifier, lzip, range, string_types
+from pandas.core.arrays import PeriodArray
+from pandas.core.indexes.datetimes import Timestamp
+from pandas.util.testing import assert_series_equal, ensure_clean
from .common import TestData
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index 509cd8d0f3241..62a677b82aeb4 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -1,21 +1,18 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
-from collections import Counter, defaultdict, OrderedDict
+from collections import Counter, OrderedDict, defaultdict
from itertools import chain
import numpy as np
-import pandas as pd
+import pytest
-from pandas import (Index, Series, DataFrame, isna)
-from pandas.compat import lrange
-from pandas import compat
-from pandas.util.testing import (assert_series_equal,
- assert_frame_equal)
+import pandas as pd
import pandas.util.testing as tm
+from pandas import DataFrame, Index, Series, compat, isna
+from pandas.compat import lrange
from pandas.conftest import _get_cython_table_params
+from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestSeriesApply():
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py
index 43b24930f6303..7ee78645fe96e 100644
--- a/pandas/tests/series/test_arithmetic.py
+++ b/pandas/tests/series/test_arithmetic.py
@@ -4,15 +4,14 @@
import numpy as np
import pytest
-from pandas import Series
-
import pandas as pd
import pandas.util.testing as tm
-
+from pandas import Series
# ------------------------------------------------------------------
# Comparisons
+
class TestSeriesComparison(object):
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
diff --git a/pandas/tests/series/test_asof.py b/pandas/tests/series/test_asof.py
index e85a0ac42ae1a..2d18b61750996 100644
--- a/pandas/tests/series/test_asof.py
+++ b/pandas/tests/series/test_asof.py
@@ -1,12 +1,10 @@
# coding=utf-8
-import pytest
-
import numpy as np
-from pandas import (offsets, Series, notna,
- isna, date_range, Timestamp)
+import pytest
import pandas.util.testing as tm
+from pandas import Series, Timestamp, date_range, isna, notna, offsets
class TestSeriesAsof():
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index bf7247caa5d4a..539e3c560b662 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -1,19 +1,16 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
from datetime import datetime
-from numpy import nan
import numpy as np
-import pandas as pd
-
-from pandas import Series, DataFrame, date_range, DatetimeIndex
+import pytest
+from numpy import nan
-from pandas import compat
-from pandas.util.testing import assert_series_equal
+import pandas as pd
import pandas.util.testing as tm
+from pandas import DataFrame, DatetimeIndex, Series, compat, date_range
+from pandas.util.testing import assert_series_equal
class TestSeriesCombine():
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 83990bddcee5d..07b8eb930e8d0 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1,30 +1,29 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
-from datetime import datetime, timedelta
from collections import OrderedDict
+from datetime import datetime, timedelta
-from numpy import nan
import numpy as np
import numpy.ma as ma
-import pandas as pd
+import pytest
+from numpy import nan
-from pandas.api.types import CategoricalDtype
-from pandas.core.dtypes.common import (
- is_categorical_dtype,
- is_datetime64tz_dtype)
-from pandas import (Index, Series, isna, date_range, Timestamp,
- NaT, period_range, timedelta_range, MultiIndex,
- IntervalIndex, Categorical, DataFrame)
-from pandas.core.arrays import period_array
+import pandas as pd
+import pandas.util.testing as tm
+from pandas import (
+ Categorical, DataFrame, Index, IntervalIndex, MultiIndex, NaT, Series,
+ Timestamp, date_range, isna, period_range, timedelta_range
+)
from pandas._libs import lib
from pandas._libs.tslib import iNaT
-
-from pandas.compat import lrange, range, zip, long, PY36
+from pandas.api.types import CategoricalDtype
+from pandas.compat import PY36, long, lrange, range, zip
+from pandas.core.arrays import period_array
+from pandas.core.dtypes.common import (
+ is_categorical_dtype, is_datetime64tz_dtype
+)
from pandas.util.testing import assert_series_equal
-import pandas.util.testing as tm
class TestSeriesConstructors():
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index 7f8bd375cb1a4..4825ecbe51584 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -1,28 +1,26 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import locale
import calendar
+import locale
import unicodedata
+from datetime import date, datetime, time
+
+import numpy as np
import pytest
import pytz
-from datetime import datetime, time, date
-
-import numpy as np
import pandas as pd
-
-from pandas.core.dtypes.common import is_integer_dtype, is_list_like
-from pandas import (Index, Series, DataFrame, bdate_range,
- date_range, period_range, timedelta_range,
- PeriodIndex, DatetimeIndex, TimedeltaIndex,
- compat)
import pandas.core.common as com
-from pandas.core.arrays import PeriodArray
+import pandas.util.testing as tm
+from pandas import (
+ DataFrame, DatetimeIndex, Index, PeriodIndex, Series, TimedeltaIndex,
+ bdate_range, compat, date_range, period_range, timedelta_range
+)
from pandas._libs.tslibs.timezones import maybe_get_tz
-
+from pandas.core.arrays import PeriodArray
+from pandas.core.dtypes.common import is_integer_dtype, is_list_like
from pandas.util.testing import assert_series_equal
-import pandas.util.testing as tm
class TestSeriesDatetimeValues():
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 55a1afcb504e7..003627b397645 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -1,28 +1,24 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
-from datetime import datetime, timedelta
-
-import sys
import string
+import sys
import warnings
+from datetime import datetime, timedelta
-from numpy import nan
-import pandas as pd
import numpy as np
+import pytest
+from numpy import nan
+import pandas as pd
+import pandas._libs.tslib as tslib
+import pandas.util.testing as tm
from pandas import (
- Series, Timestamp, Timedelta, DataFrame, date_range,
- Categorical, Index
+ Categorical, DataFrame, Index, Series, Timedelta, Timestamp, compat,
+ date_range
)
from pandas.api.types import CategoricalDtype
-import pandas._libs.tslib as tslib
-
from pandas.compat import lrange, range, u
-from pandas import compat
-import pandas.util.testing as tm
class TestSeriesDtypes():
diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py
index 506e7e14ffc4f..aab3ff7025c0b 100644
--- a/pandas/tests/series/test_internals.py
+++ b/pandas/tests/series/test_internals.py
@@ -1,20 +1,18 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
from datetime import datetime
-from numpy import nan
import numpy as np
+import pytest
+from numpy import nan
+import pandas as pd
+import pandas._libs.lib as lib
+import pandas.util.testing as tm
from pandas import Series
from pandas.core.indexes.datetimes import Timestamp
-import pandas._libs.lib as lib
-import pandas as pd
-
from pandas.util.testing import assert_series_equal
-import pandas.util.testing as tm
class TestSeriesInternals(object):
diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py
index 50f548b855247..033816dd8ee4b 100644
--- a/pandas/tests/series/test_io.py
+++ b/pandas/tests/series/test_io.py
@@ -1,20 +1,20 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-from datetime import datetime
import collections
-import pytest
+from datetime import datetime
import numpy as np
-import pandas as pd
-
-from pandas import Series, DataFrame
+import pytest
+import pandas as pd
+import pandas.util.testing as tm
+from pandas import DataFrame, Series
from pandas.compat import StringIO, u
from pandas.io.common import _get_handle
-from pandas.util.testing import (assert_series_equal, assert_almost_equal,
- assert_frame_equal, ensure_clean)
-import pandas.util.testing as tm
+from pandas.util.testing import (
+ assert_almost_equal, assert_frame_equal, assert_series_equal, ensure_clean
+)
class TestSeriesToCSV():
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index dcc4845f274ba..e89bcae052115 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -1,26 +1,26 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytz
-import pytest
-
-from datetime import timedelta, datetime
-
+from datetime import datetime, timedelta
from distutils.version import LooseVersion
-from numpy import nan
+
import numpy as np
-import pandas as pd
+import pytest
+import pytz
+from numpy import nan
-from pandas import (Series, DataFrame, isna, date_range,
- MultiIndex, Index, Timestamp, NaT, IntervalIndex,
- Categorical)
-from pandas.compat import range
+import pandas as pd
+import pandas.util._test_decorators as td
+import pandas.util.testing as tm
+from pandas import (
+ Categorical, DataFrame, Index, IntervalIndex, MultiIndex, NaT, Series,
+ Timestamp, date_range, isna
+)
from pandas._libs.tslib import iNaT
+from pandas.compat import range
from pandas.core.series import remove_na
-from pandas.util.testing import assert_series_equal, assert_frame_equal
-import pandas.util.testing as tm
-import pandas.util._test_decorators as td
from pandas.errors import PerformanceWarning
+from pandas.util.testing import assert_frame_equal, assert_series_equal
try:
import scipy
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 32a687be77b95..57688c7a3c3ab 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -1,26 +1,26 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
-
-from datetime import datetime, timedelta
import operator
+from datetime import datetime, timedelta
-from numpy import nan
import numpy as np
-import pandas as pd
+import pytest
+from numpy import nan
-from pandas import (Index, Series, DataFrame, isna, bdate_range,
- NaT, date_range, timedelta_range, Categorical)
-from pandas.core.indexes.datetimes import Timestamp
+import pandas as pd
import pandas.core.nanops as nanops
-from pandas.core import ops
-
-from pandas.compat import range
-from pandas import compat
-from pandas.util.testing import (assert_series_equal, assert_almost_equal,
- assert_frame_equal)
import pandas.util.testing as tm
+from pandas import (
+ Categorical, DataFrame, Index, NaT, Series, bdate_range, compat,
+ date_range, isna, timedelta_range
+)
+from pandas.compat import range
+from pandas.core import ops
+from pandas.core.indexes.datetimes import Timestamp
+from pandas.util.testing import (
+ assert_almost_equal, assert_frame_equal, assert_series_equal
+)
from .common import TestData
diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py
index 7a095b6dc6663..d80e2fd276407 100644
--- a/pandas/tests/series/test_period.py
+++ b/pandas/tests/series/test_period.py
@@ -2,10 +2,10 @@
import pytest
import pandas as pd
-import pandas.util.testing as tm
import pandas.core.indexes.period as period
+import pandas.util.testing as tm
+from pandas import DataFrame, Period, Series, period_range
from pandas.core.arrays import PeriodArray
-from pandas import Series, period_range, DataFrame, Period
def _permute(obj):
diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py
index fc6226c92d8fe..1a4c72b9f35fc 100644
--- a/pandas/tests/series/test_quantile.py
+++ b/pandas/tests/series/test_quantile.py
@@ -1,15 +1,14 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
-
+import pandas.util.testing as tm
from pandas import Index, Series
-from pandas.core.indexes.datetimes import Timestamp
from pandas.core.dtypes.common import is_integer
-import pandas.util.testing as tm
+from pandas.core.indexes.datetimes import Timestamp
from .common import TestData
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index e9382700af989..afe1155932ea4 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -1,23 +1,20 @@
# -*- coding: utf-8 -*-
-from pandas import compat, Timestamp
+from distutils.version import LooseVersion
+from itertools import chain
+import numpy as np
import pytest
-
-from distutils.version import LooseVersion
from numpy import nan
-import numpy as np
-from pandas import Series, date_range, NaT
+import pandas.util._test_decorators as td
+import pandas.util.testing as tm
+from pandas import NaT, Series, Timestamp, compat, date_range
+from pandas._libs.algos import Infinity, NegInfinity
+from pandas._libs.tslib import iNaT
from pandas.api.types import CategoricalDtype
-
from pandas.compat import product
-from pandas.util.testing import assert_series_equal
-import pandas.util.testing as tm
from pandas.tests.series.common import TestData
-from pandas._libs.tslib import iNaT
-from pandas._libs.algos import Infinity, NegInfinity
-from itertools import chain
-import pandas.util._test_decorators as td
+from pandas.util.testing import assert_series_equal
class TestSeriesRank(TestData):
diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py
index 9e198d2854f24..54318aa9a4a34 100644
--- a/pandas/tests/series/test_replace.py
+++ b/pandas/tests/series/test_replace.py
@@ -1,9 +1,9 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
import pandas._libs.lib as lib
import pandas.util.testing as tm
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 730c2b7865f1f..8e89dd0f44a4d 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -1,21 +1,20 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-from datetime import datetime, timedelta
-
import sys
+from datetime import datetime, timedelta
import numpy as np
-import pandas as pd
-
-from pandas import (Index, Series, DataFrame, date_range, option_context,
- Categorical, period_range, timedelta_range)
-from pandas.core.index import MultiIndex
-from pandas.core.base import StringMixin
-from pandas.compat import lrange, range, u
-from pandas import compat
+import pandas as pd
import pandas.util.testing as tm
+from pandas import (
+ Categorical, DataFrame, Index, Series, compat, date_range, option_context,
+ period_range, timedelta_range
+)
+from pandas.compat import lrange, range, u
+from pandas.core.base import StringMixin
+from pandas.core.index import MultiIndex
from .common import TestData
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py
index 13e0d1b12c372..ed60872c8e871 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/test_sorting.py
@@ -1,14 +1,13 @@
# coding=utf-8
-import pytest
-
-import numpy as np
import random
-from pandas import DataFrame, Series, MultiIndex, IntervalIndex, Categorical
+import numpy as np
+import pytest
-from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
+from pandas import Categorical, DataFrame, IntervalIndex, MultiIndex, Series
+from pandas.util.testing import assert_almost_equal, assert_series_equal
from .common import TestData
diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py
index 70e44a9d2d40f..f48b4e6a7510f 100644
--- a/pandas/tests/series/test_subclass.py
+++ b/pandas/tests/series/test_subclass.py
@@ -1,9 +1,10 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
+
import pandas as pd
-from pandas import SparseDtype
import pandas.util.testing as tm
+from pandas import SparseDtype
class TestSeriesSubclassing(object):
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index 72492de4b1247..497b1aef02897 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -1,28 +1,28 @@
# coding=utf-8
# pylint: disable-msg=E1101,W0612
-import pytest
+from datetime import datetime, time, timedelta
import numpy as np
-from datetime import datetime, timedelta, time
+import pytest
import pandas as pd
-import pandas.util.testing as tm
import pandas.util._test_decorators as td
+import pandas.util.testing as tm
+from pandas import (
+ DataFrame, Index, NaT, Series, Timestamp, concat, date_range, offsets,
+ timedelta_range, to_datetime
+)
from pandas._libs.tslib import iNaT
-from pandas.compat import lrange, StringIO, product
-from pandas.errors import NullFrequencyError
-
-from pandas.core.indexes.timedeltas import TimedeltaIndex
+from pandas.compat import StringIO, lrange, product
from pandas.core.indexes.datetimes import DatetimeIndex
-from pandas.tseries.offsets import BDay, BMonthEnd
-from pandas import (Index, Series, date_range, NaT, concat, DataFrame,
- Timestamp, to_datetime, offsets,
- timedelta_range)
-from pandas.util.testing import (assert_series_equal, assert_almost_equal,
- assert_frame_equal)
-
+from pandas.core.indexes.timedeltas import TimedeltaIndex
+from pandas.errors import NullFrequencyError
from pandas.tests.series.common import TestData
+from pandas.tseries.offsets import BDay, BMonthEnd
+from pandas.util.testing import (
+ assert_almost_equal, assert_frame_equal, assert_series_equal
+)
def _simple_ts(start, end, freq='D'):
diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py
index 8c1ea6bff5f4d..0b65af6831d07 100644
--- a/pandas/tests/series/test_timezones.py
+++ b/pandas/tests/series/test_timezones.py
@@ -4,16 +4,16 @@
"""
from datetime import datetime
+import numpy as np
import pytest
import pytz
-import numpy as np
from dateutil.tz import tzoffset
import pandas.util.testing as tm
-from pandas._libs.tslibs import timezones, conversion
+from pandas import DatetimeIndex, Index, NaT, Series, Timestamp
+from pandas._libs.tslibs import conversion, timezones
from pandas.compat import lrange
from pandas.core.indexes.datetimes import date_range
-from pandas import Series, Timestamp, DatetimeIndex, Index, NaT
class TestSeriesTimezones(object):
diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py
index 8c4b6ee5b1d75..a6cbb058dbc9d 100644
--- a/pandas/tests/series/test_validate.py
+++ b/pandas/tests/series/test_validate.py
@@ -1,4 +1,5 @@
import pytest
+
import pandas.util.testing as tm
diff --git a/setup.cfg b/setup.cfg
index a5006d66868f6..b2d11aaa93d4a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -286,41 +286,7 @@ skip=
pandas/tests/indexes/timedeltas/test_partial_slicing.py,
pandas/tests/indexes/timedeltas/test_timedelta_range.py,
pandas/tests/indexes/timedeltas/test_ops.py,
- pandas/tests/series/test_duplicates.py,
- pandas/tests/series/test_internals.py,
- pandas/tests/series/test_quantile.py,
- pandas/tests/series/test_period.py,
- pandas/tests/series/test_io.py,
- pandas/tests/series/test_validate.py,
- pandas/tests/series/test_timezones.py,
- pandas/tests/series/test_datetime_values.py,
- pandas/tests/series/test_sorting.py,
- pandas/tests/series/test_subclass.py,
- pandas/tests/series/test_operators.py,
- pandas/tests/series/test_asof.py,
- pandas/tests/series/test_apply.py,
- pandas/tests/series/test_arithmetic.py,
- pandas/tests/series/test_replace.py,
- pandas/tests/series/test_dtypes.py,
- pandas/tests/series/test_timeseries.py,
- pandas/tests/series/test_repr.py,
- pandas/tests/series/test_analytics.py,
- pandas/tests/series/test_combine_concat.py,
- pandas/tests/series/common.py,
- pandas/tests/series/test_missing.py,
- pandas/tests/series/conftest.py,
- pandas/tests/series/test_api.py,
- pandas/tests/series/test_constructors.py,
- pandas/tests/series/test_alter_axes.py,
- pandas/tests/series/test_rank.py,
- pandas/tests/series/indexing/test_indexing.py,
- pandas/tests/series/indexing/test_alter_index.py,
- pandas/tests/series/indexing/test_numeric.py,
- pandas/tests/series/indexing/test_boolean.py,
pandas/tests/series/indexing/test_callable.py,
- pandas/tests/series/indexing/test_datetime.py,
- pandas/tests/series/indexing/test_iloc.py,
- pandas/tests/series/indexing/test_loc.py,
pandas/tests/arrays/test_datetimelike.py,
pandas/tests/arrays/test_integer.py,
pandas/tests/arrays/test_interval.py,
@@ -337,6 +303,7 @@ skip=
pandas/tests/util/test_testing.py,
pandas/tests/util/test_util.py,
pandas/tests/util/test_hashing.py,
+ pandas/tests/series/test_duplicates.py
pandas/tests/extension/test_common.py,
pandas/tests/extension/test_integer.py,
pandas/tests/extension/test_external_block.py,
| - [x] partial #23334
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Run `isort -rc pandas/tests/series/` | https://api.github.com/repos/pandas-dev/pandas/pulls/23344 | 2018-10-25T22:04:05Z | 2018-10-28T03:06:31Z | 2018-10-28T03:06:31Z | 2019-01-02T20:26:23Z |
Run Isort on tests/indexes P4/Final | diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py
index d9cbb3ea27d7b..5864bc0cefcbc 100644
--- a/pandas/tests/indexes/period/test_arithmetic.py
+++ b/pandas/tests/indexes/period/test_arithmetic.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-import pytest
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
diff --git a/pandas/tests/indexes/period/test_asfreq.py b/pandas/tests/indexes/period/test_asfreq.py
index ea59a57069faa..fed6bbce6259f 100644
--- a/pandas/tests/indexes/period/test_asfreq.py
+++ b/pandas/tests/indexes/period/test_asfreq.py
@@ -1,9 +1,9 @@
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
+from pandas import DataFrame, PeriodIndex, Series
from pandas.util import testing as tm
-from pandas import PeriodIndex, Series, DataFrame
class TestPeriodIndex(object):
diff --git a/pandas/tests/indexes/period/test_astype.py b/pandas/tests/indexes/period/test_astype.py
index a5042b8c714c8..f254cf2a9cae8 100644
--- a/pandas/tests/indexes/period/test_astype.py
+++ b/pandas/tests/indexes/period/test_astype.py
@@ -5,7 +5,7 @@
import pandas as pd
import pandas.util.testing as tm
-from pandas import NaT, Period, PeriodIndex, Int64Index, Index, period_range
+from pandas import Index, Int64Index, NaT, Period, PeriodIndex, period_range
class TestPeriodIndexAsType(object):
diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py
index e1cefaf5905ad..cfc6c3b8b0a2e 100644
--- a/pandas/tests/indexes/period/test_construction.py
+++ b/pandas/tests/indexes/period/test_construction.py
@@ -1,12 +1,13 @@
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
-import pandas.util.testing as tm
import pandas.core.indexes.period as period
-from pandas.compat import lrange, PY3, text_type, lmap
-from pandas import (Period, PeriodIndex, period_range, offsets, date_range,
- Series, Index)
+import pandas.util.testing as tm
+from pandas import (
+ Index, Period, PeriodIndex, Series, date_range, offsets, period_range
+)
+from pandas.compat import PY3, lmap, lrange, text_type
from pandas.core.dtypes.dtypes import PeriodDtype
diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py
index 2a893ae16e30d..87211d091e4ea 100644
--- a/pandas/tests/indexes/period/test_formats.py
+++ b/pandas/tests/indexes/period/test_formats.py
@@ -1,10 +1,9 @@
-from pandas import PeriodIndex
-
import numpy as np
import pytest
-import pandas.util.testing as tm
import pandas as pd
+import pandas.util.testing as tm
+from pandas import PeriodIndex
def test_to_native_types():
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index 609c4a828adec..b720f56eeab93 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -1,15 +1,16 @@
from datetime import datetime, timedelta
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
-from pandas.util import testing as tm
-from pandas.compat import lrange
+from pandas import (
+ DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
+)
from pandas._libs import tslibs
-from pandas import (PeriodIndex, Series, DatetimeIndex,
- period_range, Period, notna)
from pandas._libs.tslibs import period as libperiod
+from pandas.compat import lrange
+from pandas.util import testing as tm
class TestGetItem(object):
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index 33858a28ec81b..4d3f9b7e95f8a 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -5,8 +5,7 @@
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
-from pandas import DatetimeIndex, PeriodIndex, Series, Period, Index
-
+from pandas import DatetimeIndex, Index, Period, PeriodIndex, Series
from pandas.core.arrays import PeriodArray
from pandas.tests.test_base import Ops
diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py
index 82527464ea6e7..eec761395d971 100644
--- a/pandas/tests/indexes/period/test_partial_slicing.py
+++ b/pandas/tests/indexes/period/test_partial_slicing.py
@@ -1,11 +1,11 @@
-import pytest
-
import numpy as np
+import pytest
import pandas as pd
+from pandas import (
+ DataFrame, DatetimeIndex, Period, PeriodIndex, Series, period_range
+)
from pandas.util import testing as tm
-from pandas import (Series, period_range, DatetimeIndex, PeriodIndex,
- DataFrame, Period)
class TestPeriodIndex(object):
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 405edba83dc7a..e699a560cb2f6 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -1,13 +1,13 @@
-import pytest
-
import numpy as np
+import pytest
import pandas as pd
import pandas.util._test_decorators as td
+from pandas import (
+ DataFrame, DatetimeIndex, Index, NaT, Period, PeriodIndex, Series,
+ date_range, offsets, period_range
+)
from pandas.util import testing as tm
-from pandas import (PeriodIndex, period_range, DatetimeIndex, NaT,
- Index, Period, Series, DataFrame, date_range,
- offsets)
from ..datetimelike import DatetimeLike
diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py
index 640f24f67f72f..fd8d2f94c1799 100644
--- a/pandas/tests/indexes/period/test_period_range.py
+++ b/pandas/tests/indexes/period/test_period_range.py
@@ -1,6 +1,7 @@
import pytest
+
import pandas.util.testing as tm
-from pandas import date_range, NaT, period_range, Period, PeriodIndex
+from pandas import NaT, Period, PeriodIndex, date_range, period_range
class TestPeriodRange(object):
diff --git a/pandas/tests/indexes/period/test_scalar_compat.py b/pandas/tests/indexes/period/test_scalar_compat.py
index a66a81fe99cd4..48c1d5b8dd706 100644
--- a/pandas/tests/indexes/period/test_scalar_compat.py
+++ b/pandas/tests/indexes/period/test_scalar_compat.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
"""Tests for PeriodIndex behaving like a vectorized Period scalar"""
-from pandas import PeriodIndex, date_range, Timedelta
import pandas.util.testing as tm
+from pandas import PeriodIndex, Timedelta, date_range
class TestPeriodIndexOps(object):
diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py
index 6598e0663fb9a..479104fedcf74 100644
--- a/pandas/tests/indexes/period/test_setops.py
+++ b/pandas/tests/indexes/period/test_setops.py
@@ -1,11 +1,10 @@
-import pytest
-
import numpy as np
+import pytest
import pandas as pd
-import pandas.util.testing as tm
import pandas.core.indexes.period as period
-from pandas import period_range, PeriodIndex, Index, date_range
+import pandas.util.testing as tm
+from pandas import Index, PeriodIndex, date_range, period_range
def _permute(obj):
diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py
index a7bd2f370996b..2919b97da5825 100644
--- a/pandas/tests/indexes/period/test_tools.py
+++ b/pandas/tests/indexes/period/test_tools.py
@@ -1,17 +1,17 @@
-import numpy as np
from datetime import datetime, timedelta
+
+import numpy as np
import pytest
import pandas as pd
-from pandas import Timedelta
-import pandas.util.testing as tm
import pandas.core.indexes.period as period
-from pandas.compat import lrange
-
+import pandas.util.testing as tm
+from pandas import (
+ DatetimeIndex, Period, PeriodIndex, Series, Timedelta, Timestamp,
+ date_range, period_range, to_datetime
+)
from pandas._libs.tslibs.ccalendar import MONTHS
-
-from pandas import (PeriodIndex, Period, DatetimeIndex, Timestamp, Series,
- date_range, to_datetime, period_range)
+from pandas.compat import lrange
class TestPeriodRepresentation(object):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 921bcda62794d..daebc6e95aac4 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -1,37 +1,34 @@
# -*- coding: utf-8 -*-
-import pytest
-
+import math
+import operator
+from collections import defaultdict
from datetime import datetime, timedelta
from decimal import Decimal
-from collections import defaultdict
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas.core.config as cf
import pandas.util.testing as tm
-from pandas.core.dtypes.generic import ABCIndex
+from pandas import (
+ CategoricalIndex, DataFrame, DatetimeIndex, Float64Index, Int64Index,
+ PeriodIndex, RangeIndex, Series, TimedeltaIndex, UInt64Index, date_range,
+ isna, period_range
+)
+from pandas._libs.tslib import Timestamp
+from pandas.compat import (
+ PY3, PY35, PY36, StringIO, lrange, lzip, range, text_type, u, zip
+)
+from pandas.compat.numpy import np_datetime64_compat
from pandas.core.dtypes.common import is_unsigned_integer_dtype
+from pandas.core.dtypes.generic import ABCIndex
+from pandas.core.index import _get_combined_index, ensure_index_from_sequences
from pandas.core.indexes.api import Index, MultiIndex
+from pandas.core.indexes.datetimes import _to_m8
from pandas.tests.indexes.common import Base
-
-from pandas.compat import (range, lrange, lzip, u,
- text_type, zip, PY3, PY35, PY36, StringIO)
-import math
-import operator
-import numpy as np
-
-from pandas import (period_range, date_range, Series,
- DataFrame, Float64Index, Int64Index, UInt64Index,
- CategoricalIndex, DatetimeIndex, TimedeltaIndex,
- PeriodIndex, RangeIndex, isna)
-from pandas.core.index import _get_combined_index, ensure_index_from_sequences
from pandas.util.testing import assert_almost_equal
-from pandas.compat.numpy import np_datetime64_compat
-
-import pandas.core.config as cf
-
-from pandas.core.indexes.datetimes import _to_m8
-
-import pandas as pd
-from pandas._libs.tslib import Timestamp
class TestIndex(Base):
diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py
index d89baa41d33fe..5c4e4d2417957 100644
--- a/pandas/tests/indexes/test_category.py
+++ b/pandas/tests/indexes/test_category.py
@@ -1,20 +1,19 @@
# -*- coding: utf-8 -*-
-import pytest
import numpy as np
+import pytest
+import pandas as pd
+import pandas.core.config as cf
import pandas.util.testing as tm
-from pandas.core.indexes.api import Index, CategoricalIndex
-from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas._libs import index as libindex
-from .common import Base
-
-from pandas.compat import range, PY3
-
from pandas import Categorical, IntervalIndex, compat
+from pandas._libs import index as libindex
+from pandas.compat import PY3, range
+from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas.core.indexes.api import CategoricalIndex, Index
from pandas.util.testing import assert_almost_equal
-import pandas.core.config as cf
-import pandas as pd
+
+from .common import Base
if PY3:
unicode = lambda x: x
diff --git a/pandas/tests/indexes/test_frozen.py b/pandas/tests/indexes/test_frozen.py
index 36d318e7a11aa..bc3e87a4622a7 100644
--- a/pandas/tests/indexes/test_frozen.py
+++ b/pandas/tests/indexes/test_frozen.py
@@ -1,8 +1,9 @@
import numpy as np
-from pandas.util import testing as tm
-from pandas.tests.test_base import CheckImmutable, CheckStringMixin
-from pandas.core.indexes.frozen import FrozenList, FrozenNDArray
+
from pandas.compat import u
+from pandas.core.indexes.frozen import FrozenList, FrozenNDArray
+from pandas.tests.test_base import CheckImmutable, CheckStringMixin
+from pandas.util import testing as tm
class TestFrozenList(CheckImmutable, CheckStringMixin):
diff --git a/setup.cfg b/setup.cfg
index a5006d66868f6..edcae0c87252c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -208,12 +208,9 @@ skip=
pandas/tests/test_take.py,
pandas/tests/test_nanops.py,
pandas/tests/test_config.py,
- pandas/tests/indexes/test_frozen.py,
- pandas/tests/indexes/test_base.py,
pandas/tests/indexes/test_numeric.py,
pandas/tests/indexes/test_range.py,
pandas/tests/indexes/datetimelike.py,
- pandas/tests/indexes/test_category.py,
pandas/tests/indexes/common.py,
pandas/tests/indexes/conftest.py,
pandas/tests/indexes/datetimes/test_indexing.py,
@@ -256,19 +253,7 @@ skip=
pandas/tests/indexes/multi/conftest.py,
pandas/tests/indexes/multi/test_join.py,
pandas/tests/indexes/multi/test_conversion.py,
- pandas/tests/indexes/period/test_indexing.py,
- pandas/tests/indexes/period/test_construction.py,
- pandas/tests/indexes/period/test_asfreq.py,
- pandas/tests/indexes/period/test_setops.py,
- pandas/tests/indexes/period/test_period.py,
- pandas/tests/indexes/period/test_tools.py,
- pandas/tests/indexes/period/test_period_range.py,
- pandas/tests/indexes/period/test_arithmetic.py,
- pandas/tests/indexes/period/test_astype.py,
- pandas/tests/indexes/period/test_scalar_compat.py,
- pandas/tests/indexes/period/test_partial_slicing.py,
- pandas/tests/indexes/period/test_ops.py,
- pandas/tests/indexes/period/test_formats.py,
+
pandas/tests/indexes/interval/test_construction.py,
pandas/tests/indexes/interval/test_interval_new.py,
pandas/tests/indexes/interval/test_interval.py,
| - [x] partial #23334
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Run `isort -rc pandas/tests/indexes/`
PR is capped at 20 files. So this is not all the files modified from `pandas/tests/indexes/`. So this is Part Four ( last pr) of this directory `pandas/tests/indexes/`. | https://api.github.com/repos/pandas-dev/pandas/pulls/23343 | 2018-10-25T21:48:11Z | 2018-10-28T02:07:11Z | 2018-10-28T02:07:11Z | 2019-01-02T20:26:21Z |
Fix+Test Timedelta.__mul__(nan) | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index b5b3abd01328c..41925139e41ae 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1213,6 +1213,12 @@ class Timedelta(_Timedelta):
return other.delta * self
return NotImplemented
+ elif util.is_nan(other):
+ # i.e. np.nan, but also catch np.float64("NaN") which would
+ # otherwise get caught by the hasattr(other, "dtype") branch
+ # incorrectly return a np.timedelta64 object.
+ return NaT
+
elif hasattr(other, 'dtype'):
# ndarray-like
return other * self.to_timedelta64()
@@ -1240,6 +1246,12 @@ class Timedelta(_Timedelta):
# convert to Timedelta below
pass
+ elif util.is_nan(other):
+ # i.e. np.nan, but also catch np.float64("NaN") which would
+ # otherwise get caught by the hasattr(other, "dtype") branch
+ # incorrectly return a np.timedelta64 object.
+ return NaT
+
elif hasattr(other, 'dtype'):
return self.to_timedelta64() / other
diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py
index fce1ef29235cc..65709b0eebaf7 100644
--- a/pandas/tests/scalar/timedelta/test_arithmetic.py
+++ b/pandas/tests/scalar/timedelta/test_arithmetic.py
@@ -277,6 +277,14 @@ def test_td_mul_nat(self, op, td_nat):
with pytest.raises(TypeError):
op(td, td_nat)
+ @pytest.mark.parametrize('nan', [np.nan, np.float64('NaN'), float('nan')])
+ @pytest.mark.parametrize('op', [operator.mul, ops.rmul])
+ def test_td_mul_nan(self, op, nan):
+ # np.float64('NaN') has a 'dtype' attr, avoid treating as array
+ td = Timedelta(10, unit='d')
+ result = op(td, nan)
+ assert result is NaT
+
@pytest.mark.parametrize('op', [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
@@ -328,6 +336,16 @@ def test_td_div_numeric_scalar(self):
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
+ @pytest.mark.parametrize('nan', [np.nan, np.float64('NaN'), float('nan')])
+ def test_td_div_nan(self, nan):
+ # np.float64('NaN') has a 'dtype' attr, avoid treating as array
+ td = Timedelta(10, unit='d')
+ result = td / nan
+ assert result is NaT
+
+ result = td // nan
+ assert result is NaT
+
# ---------------------------------------------------------------
# Timedelta.__rdiv__
| Specifically, `np.float64("NAN")` which in master gets caught by the `hasattr(other, "dtype")` branch and incorrectly returns `np.timedelta64("NaT", "ns")` instead of `NaT`
- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23342 | 2018-10-25T21:46:23Z | 2018-10-28T02:30:26Z | 2018-10-28T02:30:26Z | 2018-10-28T05:12:52Z |
Partialy fix issue #23334 - isort pandas/core/groupby directory | diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index ac84971de08d8..9ef30b8fd021f 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -5,8 +5,9 @@
"""
import types
+
+from pandas.core.dtypes.common import is_list_like, is_scalar
from pandas.util._decorators import make_signature
-from pandas.core.dtypes.common import is_scalar, is_list_like
class GroupByMixin(object):
diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py
index e54045884ea93..3e653704bbace 100644
--- a/pandas/core/groupby/categorical.py
+++ b/pandas/core/groupby/categorical.py
@@ -1,7 +1,9 @@
import numpy as np
+
from pandas.core.algorithms import unique1d
from pandas.core.arrays.categorical import (
- _recode_for_categories, CategoricalDtype, Categorical)
+ Categorical, CategoricalDtype, _recode_for_categories
+)
def recode_for_groupby(c, sort, observed):
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 63bf67854e5cd..a832eecf87721 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -7,48 +7,40 @@
"""
import collections
-import warnings
import copy
-from textwrap import dedent
+import warnings
from functools import partial
+from textwrap import dedent
import numpy as np
-from pandas._libs import lib, Timestamp
-from pandas.util._decorators import Substitution, Appender
-from pandas import compat
-
-import pandas.core.indexes.base as ibase
+import pandas.core.algorithms as algorithms
import pandas.core.common as com
-from pandas.core.panel import Panel
+import pandas.core.indexes.base as ibase
+from pandas import compat
+from pandas._libs import Timestamp, lib
from pandas.compat import lzip, map
-
-from pandas.core.series import Series
-from pandas.core.generic import _shared_docs
-from pandas.core.groupby.groupby import (
- GroupBy, _apply_docs, _transform_template)
-from pandas.core.generic import NDFrame
-from pandas.core.groupby import base
+from pandas.compat.numpy import _np_version_under1p13
+from pandas.core.arrays import Categorical
+from pandas.core.base import DataError, SpecificationError
+from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
- is_scalar,
- is_bool,
- is_datetimelike,
- is_numeric_dtype,
- is_integer_dtype,
- is_interval_dtype,
- ensure_platform_int,
- ensure_int64)
+ ensure_int64, ensure_platform_int, is_bool, is_datetimelike,
+ is_integer_dtype, is_interval_dtype, is_numeric_dtype, is_scalar
+)
from pandas.core.dtypes.missing import isna, notna
-import pandas.core.algorithms as algorithms
from pandas.core.frame import DataFrame
-from pandas.core.dtypes.cast import maybe_downcast_to_dtype
-from pandas.core.base import SpecificationError, DataError
-from pandas.core.index import Index, MultiIndex, CategoricalIndex
-from pandas.core.arrays import Categorical
+from pandas.core.generic import NDFrame, _shared_docs
+from pandas.core.groupby import base
+from pandas.core.groupby.groupby import (
+ GroupBy, _apply_docs, _transform_template
+)
+from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.internals import BlockManager, make_block
-from pandas.compat.numpy import _np_version_under1p13
-
+from pandas.core.panel import Panel
+from pandas.core.series import Series
from pandas.plotting._core import boxplot_frame_groupby
+from pandas.util._decorators import Appender, Substitution
class NDFrameGroupBy(GroupBy):
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 025be781d9ee8..5acccbf688e30 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -7,42 +7,36 @@ class providing the base-class of operations.
expose these user-facing objects to provide specific functionailty.
"""
-import types
-from functools import wraps, partial
-import datetime
import collections
+import datetime
+import types
import warnings
from contextlib import contextmanager
+from functools import partial, wraps
import numpy as np
-from pandas._libs import groupby as libgroupby, Timestamp
-from pandas.util._validators import validate_kwargs
-from pandas.util._decorators import (
- cache_readonly, Substitution, Appender)
-
+import pandas.core.algorithms as algorithms
+import pandas.core.common as com
from pandas import compat
-from pandas.compat import zip, range, callable, set_function_name
+from pandas._libs import Timestamp, groupby as libgroupby
+from pandas.compat import callable, range, set_function_name, zip
from pandas.compat.numpy import function as nv
-
-from pandas.core.dtypes.common import (
- is_numeric_dtype,
- is_scalar,
- ensure_float)
+from pandas.core.base import (
+ DataError, GroupByError, PandasObject, SelectionMixin, SpecificationError
+)
+from pandas.core.config import option_context
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
+from pandas.core.dtypes.common import ensure_float, is_numeric_dtype, is_scalar
from pandas.core.dtypes.missing import isna, notna
-
+from pandas.core.frame import DataFrame
+from pandas.core.generic import NDFrame
from pandas.core.groupby import base
-from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
- DataError, SpecificationError)
from pandas.core.index import Index, MultiIndex
-from pandas.core.generic import NDFrame
-from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
-import pandas.core.common as com
-import pandas.core.algorithms as algorithms
-from pandas.core.config import option_context
+from pandas.util._decorators import Appender, Substitution, cache_readonly
+from pandas.util._validators import validate_kwargs
_doc_template = """
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 1c8fe0e6cadad..cbe87040b8117 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -4,31 +4,25 @@
"""
import warnings
-import numpy as np
-from pandas.util._decorators import cache_readonly
+import numpy as np
+import pandas.core.algorithms as algorithms
+import pandas.core.common as com
from pandas import compat
-from pandas.compat import zip, callable
-
-from pandas.core.dtypes.generic import ABCSeries
-from pandas.core.arrays import ExtensionArray, Categorical
-from pandas.core.index import (
- Index, MultiIndex, CategoricalIndex)
+from pandas.compat import callable, zip
+from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.dtypes.common import (
- ensure_categorical,
- is_hashable,
- is_list_like,
- is_timedelta64_dtype,
- is_datetime64_dtype,
- is_categorical_dtype,
- is_scalar)
-from pandas.core.series import Series
+ ensure_categorical, is_categorical_dtype, is_datetime64_dtype, is_hashable,
+ is_list_like, is_scalar, is_timedelta64_dtype
+)
+from pandas.core.dtypes.generic import ABCSeries
from pandas.core.frame import DataFrame
-import pandas.core.common as com
from pandas.core.groupby.ops import BaseGrouper
-import pandas.core.algorithms as algorithms
+from pandas.core.index import CategoricalIndex, Index, MultiIndex
+from pandas.core.series import Series
from pandas.io.formats.printing import pprint_thing
+from pandas.util._decorators import cache_readonly
class Grouper(object):
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py
index b199127ac867b..af22744c4feec 100644
--- a/pandas/core/groupby/ops.py
+++ b/pandas/core/groupby/ops.py
@@ -6,42 +6,33 @@
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
-import copy
import collections
-import numpy as np
-
-from pandas._libs import lib, reduction, NaT, iNaT, groupby as libgroupby
-from pandas.util._decorators import cache_readonly
+import copy
-from pandas.compat import zip, range, lzip
+import numpy as np
+import pandas.core.algorithms as algorithms
+import pandas.core.common as com
+from pandas._libs import NaT, groupby as libgroupby, iNaT, lib, reduction
+from pandas.compat import lzip, range, zip
from pandas.core.base import SelectionMixin
-from pandas.core.dtypes.missing import isna, _maybe_fill
-from pandas.core.index import (
- Index, MultiIndex, ensure_index)
from pandas.core.dtypes.common import (
- ensure_float64,
- ensure_platform_int,
- ensure_int64,
- ensure_int64_or_float64,
- ensure_object,
- needs_i8_conversion,
- is_integer_dtype,
- is_complex_dtype,
- is_bool_dtype,
- is_numeric_dtype,
- is_timedelta64_dtype,
- is_datetime64_any_dtype,
- is_categorical_dtype)
-from pandas.core.series import Series
+ ensure_float64, ensure_int64, ensure_int64_or_float64, ensure_object,
+ ensure_platform_int, is_bool_dtype, is_categorical_dtype, is_complex_dtype,
+ is_datetime64_any_dtype, is_integer_dtype, is_numeric_dtype,
+ is_timedelta64_dtype, needs_i8_conversion
+)
+from pandas.core.dtypes.missing import _maybe_fill, isna
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
-import pandas.core.common as com
from pandas.core.groupby import base
-from pandas.core.sorting import (get_group_index_sorter, get_group_index,
- compress_group_index, get_flattened_iterator,
- decons_obs_group_ids, get_indexer_dict)
-import pandas.core.algorithms as algorithms
+from pandas.core.index import Index, MultiIndex, ensure_index
+from pandas.core.series import Series
+from pandas.core.sorting import (
+ compress_group_index, decons_obs_group_ids, get_flattened_iterator,
+ get_group_index, get_group_index_sorter, get_indexer_dict
+)
+from pandas.util._decorators import cache_readonly
def generate_bins_generic(values, binner, closed):
diff --git a/setup.cfg b/setup.cfg
index ee39844996025..9eee370dc9f1f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -182,12 +182,6 @@ skip=
pandas/core/internals/concat.py,
pandas/core/internals/managers.py,
pandas/core/internals/blocks.py,
- pandas/core/groupby/ops.py,
- pandas/core/groupby/categorical.py,
- pandas/core/groupby/generic.py,
- pandas/core/groupby/groupby.py,
- pandas/core/groupby/grouper.py,
- pandas/core/groupby/base.py,
pandas/core/reshape/concat.py,
pandas/core/reshape/tile.py,
pandas/core/reshape/melt.py,
| The imports have been sorted with isort in the pandas/core/groupby directory.
- [x] partially 23334
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23341 | 2018-10-25T21:45:48Z | 2018-10-26T01:51:07Z | 2018-10-26T01:51:07Z | 2018-10-26T17:12:20Z |
Run Isort on tests/indexes part three | diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index e8eaca2b61dd7..bec969020559c 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -1,24 +1,21 @@
# -*- coding: utf-8 -*-
+import numpy as np
import pytest
-from pandas import compat
+import pandas as pd
+import pandas.util.testing as tm
+from pandas import (
+ CategoricalIndex, DatetimeIndex, Float64Index, Index, Int64Index,
+ IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex,
+ UInt64Index, compat, isna
+)
+from pandas._libs.tslib import iNaT
from pandas.compat import PY3
-
-import numpy as np
-
-from pandas import (Series, Index, Float64Index, Int64Index, UInt64Index,
- RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex,
- TimedeltaIndex, PeriodIndex, IntervalIndex, isna)
-from pandas.core.indexes.base import InvalidIndexError
-from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.dtypes.common import needs_i8_conversion
from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas._libs.tslib import iNaT
-
-import pandas.util.testing as tm
-
-import pandas as pd
+from pandas.core.indexes.base import InvalidIndexError
+from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
class Base(object):
diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py
index 6d88ef0cfa6c5..4a2a4a7deb5cc 100644
--- a/pandas/tests/indexes/conftest.py
+++ b/pandas/tests/indexes/conftest.py
@@ -1,10 +1,10 @@
-import pytest
import numpy as np
-import pandas as pd
+import pytest
+import pandas as pd
import pandas.util.testing as tm
+from pandas.compat import long, lzip
from pandas.core.indexes.api import Index, MultiIndex
-from pandas.compat import lzip, long
@pytest.fixture(params=[tm.makeUnicodeIndex(100),
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index 7af8b259fa137..bb51b47a7fd0a 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -1,10 +1,12 @@
""" generic datetimelike tests """
-import pytest
import numpy as np
+import pytest
+
import pandas as pd
-from .common import Base
import pandas.util.testing as tm
+from .common import Base
+
class DatetimeLike(Base):
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py
index 4feed589f5961..038c4f786f69f 100644
--- a/pandas/tests/indexes/datetimes/test_arithmetic.py
+++ b/pandas/tests/indexes/datetimes/test_arithmetic.py
@@ -6,8 +6,8 @@
import pandas as pd
import pandas.util.testing as tm
+from pandas import DatetimeIndex, Series, date_range
from pandas.errors import NullFrequencyError
-from pandas import Series, DatetimeIndex, date_range
class TestDatetimeIndexArithmetic(object):
diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py
index be22d80a862e1..8895624d74e89 100644
--- a/pandas/tests/indexes/datetimes/test_astype.py
+++ b/pandas/tests/indexes/datetimes/test_astype.py
@@ -1,16 +1,17 @@
-import pytest
+from datetime import datetime
-import pytz
import dateutil
import numpy as np
-
-from datetime import datetime
+import pytest
+import pytz
from dateutil.tz import tzlocal
import pandas as pd
import pandas.util.testing as tm
-from pandas import (DatetimeIndex, date_range, Series, NaT, Index, Timestamp,
- Int64Index, Period)
+from pandas import (
+ DatetimeIndex, Index, Int64Index, NaT, Period, Series, Timestamp,
+ date_range
+)
class TestDatetimeIndex(object):
diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py
index b6f27cbdd1b89..2fc3e29a47b20 100644
--- a/pandas/tests/indexes/datetimes/test_construction.py
+++ b/pandas/tests/indexes/datetimes/test_construction.py
@@ -1,18 +1,19 @@
from datetime import timedelta
-from operator import attrgetter
from functools import partial
+from operator import attrgetter
+import numpy as np
import pytest
import pytz
-import numpy as np
import pandas as pd
-from pandas import offsets
import pandas.util.testing as tm
+from pandas import (
+ DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
+ to_datetime
+)
from pandas._libs.tslib import OutOfBoundsDatetime
from pandas._libs.tslibs import conversion
-from pandas import (DatetimeIndex, Index, Timestamp, datetime, date_range,
- to_datetime)
class TestDatetimeIndex(object):
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index c05c80df29dac..657d8ffe0cd38 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -2,22 +2,23 @@
test date_range, bdate_range construction from the convenience range functions
"""
-import pytest
+from datetime import datetime, time, timedelta
import numpy as np
+import pytest
import pytz
from pytz import timezone
-from datetime import datetime, timedelta, time
import pandas as pd
-import pandas.util.testing as tm
import pandas.util._test_decorators as td
-from pandas import compat
-from pandas import date_range, bdate_range, offsets, DatetimeIndex, Timestamp
-from pandas.tseries.offsets import (generate_range, CDay, BDay, DateOffset,
- MonthEnd, prefix_mapping)
-
+import pandas.util.testing as tm
+from pandas import (
+ DatetimeIndex, Timestamp, bdate_range, compat, date_range, offsets
+)
from pandas.tests.series.common import TestData
+from pandas.tseries.offsets import (
+ BDay, CDay, DateOffset, MonthEnd, generate_range, prefix_mapping
+)
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py
index 5ab32ee3863ae..84214b331e8af 100644
--- a/pandas/tests/indexes/datetimes/test_datetime.py
+++ b/pandas/tests/indexes/datetimes/test_datetime.py
@@ -1,17 +1,16 @@
import sys
-
-import pytest
-
-import numpy as np
from datetime import date
import dateutil
+import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
+from pandas import (
+ DataFrame, DatetimeIndex, Index, Timestamp, date_range, offsets
+)
from pandas.compat import lrange
-from pandas import (DatetimeIndex, Index, date_range, DataFrame,
- Timestamp, offsets)
-
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py
index c6b3a77773dc7..f095e0a06c34e 100644
--- a/pandas/tests/indexes/datetimes/test_datetimelike.py
+++ b/pandas/tests/indexes/datetimes/test_datetimelike.py
@@ -1,7 +1,7 @@
""" generic tests from the Datetimelike class """
-from pandas.util import testing as tm
from pandas import DatetimeIndex, date_range
+from pandas.util import testing as tm
from ..datetimelike import DatetimeLike
diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py
index 63d5338d88d76..4279c582e9777 100644
--- a/pandas/tests/indexes/datetimes/test_formats.py
+++ b/pandas/tests/indexes/datetimes/test_formats.py
@@ -1,13 +1,13 @@
from datetime import datetime
-from pandas import DatetimeIndex, Series
-import numpy as np
import dateutil.tz
-import pytz
+import numpy as np
import pytest
+import pytz
-import pandas.util.testing as tm
import pandas as pd
+import pandas.util.testing as tm
+from pandas import DatetimeIndex, Series
def test_to_native_types():
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py
index 601a7b13e370a..cb2b0dfe5d11c 100644
--- a/pandas/tests/indexes/datetimes/test_indexing.py
+++ b/pandas/tests/indexes/datetimes/test_indexing.py
@@ -1,13 +1,14 @@
-from datetime import datetime, timedelta, time
-import pytest
+from datetime import datetime, time, timedelta
-import pytz
import numpy as np
+import pytest
+import pytz
+
import pandas as pd
-import pandas.util.testing as tm
import pandas.compat as compat
-from pandas import notna, Index, DatetimeIndex, date_range, Timestamp
-from pandas.tseries.offsets import CDay, BDay
+import pandas.util.testing as tm
+from pandas import DatetimeIndex, Index, Timestamp, date_range, notna
+from pandas.tseries.offsets import BDay, CDay
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py
index 743cbc107cce5..00d816459626b 100644
--- a/pandas/tests/indexes/datetimes/test_misc.py
+++ b/pandas/tests/indexes/datetimes/test_misc.py
@@ -1,14 +1,15 @@
-import locale
import calendar
+import locale
import unicodedata
+import numpy as np
import pytest
-import numpy as np
import pandas as pd
import pandas.util.testing as tm
-from pandas import (Index, DatetimeIndex, datetime, offsets,
- date_range, Timestamp, compat)
+from pandas import (
+ DatetimeIndex, Index, Timestamp, compat, date_range, datetime, offsets
+)
class TestTimeSeries(object):
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index ad44ceab36bc3..f06291a7e4e9e 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -1,17 +1,19 @@
-import pytest
import warnings
-import numpy as np
from datetime import datetime
+import numpy as np
+import pytest
+
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
-from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp,
- date_range, bdate_range, Index)
-from pandas.tseries.offsets import BMonthEnd, CDay, BDay, Day, Hour
-from pandas.tests.test_base import Ops
+from pandas import (
+ DatetimeIndex, Index, PeriodIndex, Series, Timestamp, bdate_range,
+ date_range
+)
from pandas.core.dtypes.generic import ABCDateOffset
-
+from pandas.tests.test_base import Ops
+from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py
index e1e80e50e31f0..ae50ccedd7917 100644
--- a/pandas/tests/indexes/datetimes/test_partial_slicing.py
+++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py
@@ -1,17 +1,17 @@
""" test partial slicing on Series/Frame """
-import pytest
-
-from datetime import datetime
-import numpy as np
-import pandas as pd
import operator as op
+from datetime import datetime
-from pandas import (DatetimeIndex, Series, DataFrame,
- date_range, Index, Timedelta, Timestamp)
-from pandas.util import testing as tm
+import numpy as np
+import pytest
+import pandas as pd
+from pandas import (
+ DataFrame, DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range
+)
from pandas.core.indexing import IndexingError
+from pandas.util import testing as tm
class TestSlicing(object):
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index d054121c6dfab..dac2df520c8e4 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -7,10 +7,9 @@
import numpy as np
import pytest
-import pandas.util.testing as tm
import pandas as pd
-
-from pandas import date_range, Timestamp, DatetimeIndex
+import pandas.util.testing as tm
+from pandas import DatetimeIndex, Timestamp, date_range
from pandas.tseries.frequencies import to_offset
diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py
index cb9364edc0cc3..896d1cfd0094c 100644
--- a/pandas/tests/indexes/datetimes/test_setops.py
+++ b/pandas/tests/indexes/datetimes/test_setops.py
@@ -1,14 +1,16 @@
from datetime import datetime
-import pytest
import numpy as np
+import pytest
import pandas as pd
-import pandas.util.testing as tm
import pandas.util._test_decorators as td
-from pandas import (DatetimeIndex, date_range, Series, bdate_range, DataFrame,
- Int64Index, Index, to_datetime)
-from pandas.tseries.offsets import Minute, BMonthEnd, MonthEnd
+import pandas.util.testing as tm
+from pandas import (
+ DataFrame, DatetimeIndex, Index, Int64Index, Series, bdate_range,
+ date_range, to_datetime
+)
+from pandas.tseries.offsets import BMonthEnd, Minute, MonthEnd
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py
index 1369783657f92..6309206f48b1a 100644
--- a/pandas/tests/indexes/datetimes/test_timezones.py
+++ b/pandas/tests/indexes/datetimes/test_timezones.py
@@ -2,23 +2,24 @@
"""
Tests for DatetimeIndex timezone-related methods
"""
-from datetime import datetime, timedelta, tzinfo, date, time
+from datetime import date, datetime, time, timedelta, tzinfo
from distutils.version import LooseVersion
+import dateutil
+import numpy as np
import pytest
import pytz
-import dateutil
from dateutil.tz import gettz, tzlocal
-import numpy as np
-
-import pandas.util.testing as tm
-import pandas.util._test_decorators as td
import pandas as pd
-from pandas._libs.tslibs import timezones, conversion
-from pandas.compat import lrange, zip, PY3
-from pandas import (DatetimeIndex, date_range, bdate_range,
- Timestamp, isna, to_datetime, Index)
+import pandas.util._test_decorators as td
+import pandas.util.testing as tm
+from pandas import (
+ DatetimeIndex, Index, Timestamp, bdate_range, date_range, isna,
+ to_datetime
+)
+from pandas._libs.tslibs import conversion, timezones
+from pandas.compat import PY3, lrange, zip
class FixedOffset(tzinfo):
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 74703e2837c4a..428888a0e366d 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -1,29 +1,31 @@
""" test to_datetime """
-import pytz
-import pytest
-import locale
import calendar
+import locale
+from datetime import datetime, time
+from distutils.version import LooseVersion
+
import dateutil
import numpy as np
+import pytest
+import pytz
from dateutil.parser import parse
from dateutil.tz.tz import tzoffset
-from datetime import datetime, time
-from distutils.version import LooseVersion
import pandas as pd
+import pandas.util._test_decorators as td
+from pandas import (
+ DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, compat,
+ date_range, isna, to_datetime
+)
from pandas._libs import tslib
from pandas._libs.tslibs import parsing
+from pandas.compat import PY3, lmap
+from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.core.tools import datetimes as tools
-
from pandas.errors import OutOfBoundsDatetime
-from pandas.compat import lmap, PY3
-from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.util import testing as tm
-import pandas.util._test_decorators as td
from pandas.util.testing import assert_series_equal
-from pandas import (isna, to_datetime, Timestamp, Series, DataFrame,
- Index, DatetimeIndex, NaT, date_range, compat)
class TestTimeConversionFormats(object):
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py
index 186f00cfe7426..6f151134bd24f 100644
--- a/pandas/tests/indexes/test_numeric.py
+++ b/pandas/tests/indexes/test_numeric.py
@@ -1,20 +1,15 @@
# -*- coding: utf-8 -*-
-import pytest
-
from datetime import datetime
-from pandas.compat import range
import numpy as np
-
-from pandas import (Series, Index, Float64Index,
- Int64Index, UInt64Index)
-
-import pandas.util.testing as tm
+import pytest
import pandas as pd
+import pandas.util.testing as tm
+from pandas import Float64Index, Index, Int64Index, Series, UInt64Index
from pandas._libs.tslibs import Timestamp
-
+from pandas.compat import range
from pandas.tests.indexes.common import Base
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index 0e47fcd52f625..4bd66c070e731 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -1,21 +1,16 @@
# -*- coding: utf-8 -*-
-import pytest
-
+import operator
from datetime import datetime
from itertools import combinations
-import operator
-
-from pandas.compat import range, u, PY3
import numpy as np
-
-from pandas import (isna, Series, Index, Float64Index,
- Int64Index, RangeIndex)
-
-import pandas.util.testing as tm
+import pytest
import pandas as pd
+import pandas.util.testing as tm
+from pandas import Float64Index, Index, Int64Index, RangeIndex, Series, isna
+from pandas.compat import PY3, range, u
from .test_numeric import Numeric
diff --git a/setup.cfg b/setup.cfg
index 6fd1f3fd141d3..83ec8c6a186e1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -208,27 +208,10 @@ skip=
pandas/tests/test_take.py,
pandas/tests/test_nanops.py,
pandas/tests/test_config.py,
- pandas/tests/indexes/test_numeric.py,
- pandas/tests/indexes/test_range.py,
- pandas/tests/indexes/datetimelike.py,
- pandas/tests/indexes/common.py,
- pandas/tests/indexes/conftest.py,
- pandas/tests/indexes/datetimes/test_indexing.py,
- pandas/tests/indexes/datetimes/test_construction.py,
- pandas/tests/indexes/datetimes/test_datetimelike.py,
- pandas/tests/indexes/datetimes/test_setops.py,
- pandas/tests/indexes/datetimes/test_timezones.py,
- pandas/tests/indexes/datetimes/test_datetime.py,
- pandas/tests/indexes/datetimes/test_tools.py,
- pandas/tests/indexes/datetimes/test_arithmetic.py,
- pandas/tests/indexes/datetimes/test_astype.py,
- pandas/tests/indexes/datetimes/test_date_range.py,
- pandas/tests/indexes/datetimes/test_misc.py,
- pandas/tests/indexes/datetimes/test_scalar_compat.py,
- pandas/tests/indexes/datetimes/test_partial_slicing.py,
+ pandas/tests/indexes/test_frozen.py,
+ pandas/tests/indexes/test_base.py,
+ pandas/tests/indexes/test_category.py,
pandas/tests/indexes/datetimes/test_missing.py,
- pandas/tests/indexes/datetimes/test_ops.py,
- pandas/tests/indexes/datetimes/test_formats.py,
pandas/tests/indexes/multi/test_duplicates.py,
pandas/tests/indexes/multi/test_partial_indexing.py,
pandas/tests/indexes/multi/test_indexing.py,
| - [x] partial #23334
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Run `isort -rc pandas/tests/indexes/`
PR is capped at 20 files. So this is not all the files modified from `pandas/tests/indexes/`. So this is Part Three of this directory `pandas/tests/indexes/`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23340 | 2018-10-25T21:42:09Z | 2018-10-28T02:19:25Z | 2018-10-28T02:19:25Z | 2019-01-02T20:26:19Z |
Fix import format tests/indexes/multi Part Two | diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py
index 6bbc938c346f7..4389a22641b72 100644
--- a/pandas/tests/indexes/interval/test_astype.py
+++ b/pandas/tests/indexes/interval/test_astype.py
@@ -1,17 +1,14 @@
from __future__ import division
-import pytest
import numpy as np
+import pytest
+
+import pandas.util.testing as tm
from pandas import (
- Index,
- IntervalIndex,
- interval_range,
- CategoricalIndex,
- Timestamp,
- Timedelta,
- NaT)
+ CategoricalIndex, Index, IntervalIndex, NaT, Timedelta, Timestamp,
+ interval_range
+)
from pandas.core.dtypes.dtypes import CategoricalDtype, IntervalDtype
-import pandas.util.testing as tm
class Base(object):
diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py
index 208d498180692..a937dbc40a843 100644
--- a/pandas/tests/indexes/interval/test_construction.py
+++ b/pandas/tests/indexes/interval/test_construction.py
@@ -1,18 +1,20 @@
from __future__ import division
-import pytest
-import numpy as np
from functools import partial
+import numpy as np
+import pytest
+
+import pandas.core.common as com
+import pandas.util.testing as tm
from pandas import (
- Interval, IntervalIndex, Index, Int64Index, Float64Index, Categorical,
- CategoricalIndex, date_range, timedelta_range, period_range, notna)
+ Categorical, CategoricalIndex, Float64Index, Index, Int64Index, Interval,
+ IntervalIndex, date_range, notna, period_range, timedelta_range
+)
from pandas.compat import lzip
from pandas.core.arrays import IntervalArray
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
-import pandas.core.common as com
-import pandas.util.testing as tm
@pytest.fixture(params=[None, 'foo'])
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index f6ed658251dc7..ac0446373a6a1 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -1,17 +1,20 @@
from __future__ import division
+import re
from itertools import permutations
-import pytest
+
import numpy as np
-import re
+import pytest
+
+import pandas as pd
+import pandas.core.common as com
+import pandas.util.testing as tm
from pandas import (
- Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
- Timedelta, date_range, timedelta_range)
+ Index, Interval, IntervalIndex, Timedelta, Timestamp, date_range,
+ interval_range, isna, notna, timedelta_range
+)
from pandas.compat import lzip
-import pandas.core.common as com
from pandas.tests.indexes.common import Base
-import pandas.util.testing as tm
-import pandas as pd
@pytest.fixture(scope='class', params=[None, 'foo'])
diff --git a/pandas/tests/indexes/interval/test_interval_new.py b/pandas/tests/indexes/interval/test_interval_new.py
index 02801f6bbc7ee..80905e13e9525 100644
--- a/pandas/tests/indexes/interval/test_interval_new.py
+++ b/pandas/tests/indexes/interval/test_interval_new.py
@@ -1,11 +1,10 @@
from __future__ import division
-import pytest
import numpy as np
+import pytest
-from pandas import Interval, IntervalIndex, Int64Index
import pandas.util.testing as tm
-
+from pandas import Int64Index, Interval, IntervalIndex
pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316")
diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py
index 447856e7e9d51..9e11c357c075d 100644
--- a/pandas/tests/indexes/interval/test_interval_range.py
+++ b/pandas/tests/indexes/interval/test_interval_range.py
@@ -1,14 +1,17 @@
from __future__ import division
-import pytest
-import numpy as np
from datetime import timedelta
+
+import numpy as np
+import pytest
+
+import pandas.util.testing as tm
from pandas import (
- Interval, IntervalIndex, Timestamp, Timedelta, DateOffset,
- interval_range, date_range, timedelta_range)
+ DateOffset, Interval, IntervalIndex, Timedelta, Timestamp, date_range,
+ interval_range, timedelta_range
+)
from pandas.core.dtypes.common import is_integer
from pandas.tseries.offsets import Day
-import pandas.util.testing as tm
@pytest.fixture(scope='class', params=[None, 'foo'])
diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py
index 5f248bf7725e5..11d2d0283cdcf 100644
--- a/pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas/tests/indexes/interval/test_interval_tree.py
@@ -1,10 +1,11 @@
from __future__ import division
-import pytest
import numpy as np
+import pytest
+
+import pandas.util.testing as tm
from pandas import compat
from pandas._libs.interval import IntervalTree
-import pandas.util.testing as tm
@pytest.fixture(
diff --git a/pandas/tests/indexes/multi/conftest.py b/pandas/tests/indexes/multi/conftest.py
index afe651d22c6a7..9fad4547648d5 100644
--- a/pandas/tests/indexes/multi/conftest.py
+++ b/pandas/tests/indexes/multi/conftest.py
@@ -2,6 +2,7 @@
import numpy as np
import pytest
+
from pandas import Index, MultiIndex
diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py
index 9f6a72f803f9d..8d602b0bb2b1d 100644
--- a/pandas/tests/indexes/multi/test_analytics.py
+++ b/pandas/tests/indexes/multi/test_analytics.py
@@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-import pytest
from pandas import Index, MultiIndex, date_range, period_range
from pandas.compat import lrange
diff --git a/pandas/tests/indexes/multi/test_astype.py b/pandas/tests/indexes/multi/test_astype.py
index e0e23609290e5..5da96717bc077 100644
--- a/pandas/tests/indexes/multi/test_astype.py
+++ b/pandas/tests/indexes/multi/test_astype.py
@@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
import numpy as np
-import pandas.util.testing as tm
import pytest
-from pandas.util.testing import assert_copy
+
+import pandas.util.testing as tm
from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas.util.testing import assert_copy
def test_astype(idx):
diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py
index 0dfe322c2eef9..ef3f7ddbbf81d 100644
--- a/pandas/tests/indexes/multi/test_compat.py
+++ b/pandas/tests/indexes/multi/test_compat.py
@@ -2,8 +2,9 @@
import numpy as np
-import pandas.util.testing as tm
import pytest
+
+import pandas.util.testing as tm
from pandas import MultiIndex
from pandas.compat import PY3, long
diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py
index ab2e4c1d863a7..ca9b2766f3798 100644
--- a/pandas/tests/indexes/multi/test_constructor.py
+++ b/pandas/tests/indexes/multi/test_constructor.py
@@ -3,9 +3,10 @@
import re
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-import pytest
from pandas import Index, MultiIndex, date_range
from pandas._libs.tslib import Timestamp
from pandas.compat import lrange, range
diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py
index 7b91a1d14d7e8..c846af3e7cfbe 100644
--- a/pandas/tests/indexes/multi/test_contains.py
+++ b/pandas/tests/indexes/multi/test_contains.py
@@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-import pytest
from pandas import MultiIndex
from pandas.compat import PYPY
diff --git a/pandas/tests/indexes/multi/test_conversion.py b/pandas/tests/indexes/multi/test_conversion.py
index 8c9566b7e651f..1daccefcfe876 100644
--- a/pandas/tests/indexes/multi/test_conversion.py
+++ b/pandas/tests/indexes/multi/test_conversion.py
@@ -2,6 +2,7 @@
import numpy as np
+
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, MultiIndex, date_range
diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py
index 786b90e8f13a2..99a5bcc1c217b 100644
--- a/pandas/tests/indexes/multi/test_copy.py
+++ b/pandas/tests/indexes/multi/test_copy.py
@@ -2,8 +2,9 @@
from copy import copy, deepcopy
-import pandas.util.testing as tm
import pytest
+
+import pandas.util.testing as tm
from pandas import MultiIndex
diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py
index 281db7fd2c8a7..a692b510c569c 100644
--- a/pandas/tests/indexes/multi/test_drop.py
+++ b/pandas/tests/indexes/multi/test_drop.py
@@ -2,9 +2,10 @@
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-import pytest
from pandas import Index, MultiIndex
from pandas.compat import lrange
from pandas.errors import PerformanceWarning
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index dfc9f329e0753..4336d891adcdc 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -1,14 +1,14 @@
# -*- coding: utf-8 -*-
from itertools import product
-import pytest
import numpy as np
+import pytest
-from pandas.compat import range, u
-from pandas import MultiIndex, DatetimeIndex
-from pandas._libs import hashtable
import pandas.util.testing as tm
+from pandas import DatetimeIndex, MultiIndex
+from pandas._libs import hashtable
+from pandas.compat import range, u
@pytest.mark.parametrize('names', [None, ['first', 'second']])
diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py
index 7770ee96bbfb3..e3af327ededa2 100644
--- a/pandas/tests/indexes/multi/test_equivalence.py
+++ b/pandas/tests/indexes/multi/test_equivalence.py
@@ -2,6 +2,7 @@
import numpy as np
+
import pandas as pd
import pandas.util.testing as tm
from pandas import Index, MultiIndex, Series
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index ff2170839b012..cac32ce7040b4 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -2,9 +2,10 @@
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-import pytest
from pandas import CategoricalIndex, Index, MultiIndex
from pandas.compat import range
diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py
index 9ec11f1f42b9a..ec83ae4766ab0 100644
--- a/pandas/tests/indexes/multi/test_indexing.py
+++ b/pandas/tests/indexes/multi/test_indexing.py
@@ -8,8 +8,10 @@
import pandas as pd
import pandas.util.testing as tm
-from pandas import (Categorical, CategoricalIndex, Index, IntervalIndex,
- MultiIndex, date_range)
+from pandas import (
+ Categorical, CategoricalIndex, Index, IntervalIndex, MultiIndex,
+ date_range
+)
from pandas.compat import lrange
from pandas.core.indexes.base import InvalidIndexError
from pandas.util.testing import assert_almost_equal
diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py
index a2401035c80f8..4d08fa7cef7a4 100644
--- a/pandas/tests/indexes/multi/test_integrity.py
+++ b/pandas/tests/indexes/multi/test_integrity.py
@@ -3,9 +3,10 @@
import re
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-import pytest
from pandas import IntervalIndex, MultiIndex, RangeIndex
from pandas.compat import lrange, range
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
diff --git a/pandas/tests/indexes/multi/test_join.py b/pandas/tests/indexes/multi/test_join.py
index ac3958956bae7..8d89ad9f1cd0c 100644
--- a/pandas/tests/indexes/multi/test_join.py
+++ b/pandas/tests/indexes/multi/test_join.py
@@ -2,9 +2,10 @@
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-import pytest
from pandas import Index, MultiIndex
diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py
index 82c486caf2631..7a91ac6d96220 100644
--- a/pandas/tests/indexes/multi/test_missing.py
+++ b/pandas/tests/indexes/multi/test_missing.py
@@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-import pytest
from pandas import Int64Index, MultiIndex, PeriodIndex, UInt64Index
from pandas._libs.tslib import iNaT
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
diff --git a/pandas/tests/indexes/multi/test_monotonic.py b/pandas/tests/indexes/multi/test_monotonic.py
index b9492794c479b..a854035b37544 100644
--- a/pandas/tests/indexes/multi/test_monotonic.py
+++ b/pandas/tests/indexes/multi/test_monotonic.py
@@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
import numpy as np
-import pandas as pd
import pytest
+
+import pandas as pd
from pandas import Index, IntervalIndex, MultiIndex
diff --git a/pandas/tests/indexes/multi/test_reindex.py b/pandas/tests/indexes/multi/test_reindex.py
index be1f430140a09..f7651ac258d48 100644
--- a/pandas/tests/indexes/multi/test_reindex.py
+++ b/pandas/tests/indexes/multi/test_reindex.py
@@ -2,6 +2,7 @@
import numpy as np
+
import pandas as pd
import pandas.util.testing as tm
from pandas import Index, MultiIndex
diff --git a/pandas/tests/indexes/multi/test_set_ops.py b/pandas/tests/indexes/multi/test_set_ops.py
index 3f61cf2b6ff3f..46d7a27e02aec 100644
--- a/pandas/tests/indexes/multi/test_set_ops.py
+++ b/pandas/tests/indexes/multi/test_set_ops.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import numpy as np
+
import pandas as pd
import pandas.util.testing as tm
from pandas import MultiIndex, Series
diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py
index ee29ea1be8aea..b2f12405eb195 100644
--- a/pandas/tests/indexes/multi/test_sorting.py
+++ b/pandas/tests/indexes/multi/test_sorting.py
@@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-import pytest
from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, RangeIndex
from pandas.compat import lrange
from pandas.errors import PerformanceWarning, UnsortedIndexError
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index e425937fedf4b..82654a3533132 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -1,16 +1,17 @@
# -*- coding: utf-8 -*-
-import pytest
-import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
+import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
-from pandas import (DatetimeIndex, TimedeltaIndex, Int64Index,
- timedelta_range, date_range,
- Series,
- Timestamp, Timedelta)
+from pandas import (
+ DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
+ date_range, timedelta_range
+)
from pandas.errors import NullFrequencyError
diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py
index 329f0c2467e8b..54f1ac601fd69 100644
--- a/pandas/tests/indexes/timedeltas/test_astype.py
+++ b/pandas/tests/indexes/timedeltas/test_astype.py
@@ -1,12 +1,13 @@
from datetime import timedelta
-import pytest
-
import numpy as np
+import pytest
import pandas.util.testing as tm
-from pandas import (TimedeltaIndex, timedelta_range, Int64Index, Float64Index,
- Index, Timedelta, NaT)
+from pandas import (
+ Float64Index, Index, Int64Index, NaT, Timedelta, TimedeltaIndex,
+ timedelta_range
+)
class TestTimedeltaIndex(object):
diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py
index 447e2b40050f6..a5cfad98b31c1 100644
--- a/pandas/tests/indexes/timedeltas/test_construction.py
+++ b/pandas/tests/indexes/timedeltas/test_construction.py
@@ -1,7 +1,7 @@
-import pytest
+from datetime import timedelta
import numpy as np
-from datetime import timedelta
+import pytest
import pandas as pd
import pandas.util.testing as tm
diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py
index 8ba2c81f429d8..e0e932efafd55 100644
--- a/pandas/tests/indexes/timedeltas/test_indexing.py
+++ b/pandas/tests/indexes/timedeltas/test_indexing.py
@@ -1,11 +1,11 @@
from datetime import datetime, timedelta
-import pytest
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
-from pandas import TimedeltaIndex, timedelta_range, compat, Index, Timedelta
+from pandas import Index, Timedelta, TimedeltaIndex, compat, timedelta_range
class TestGetItem(object):
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 4b8c37cceb444..9b3bcbef36805 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -1,16 +1,18 @@
-import pytest
+from datetime import timedelta
import numpy as np
-from datetime import timedelta
+import pytest
import pandas as pd
import pandas.util.testing as tm
-from pandas import (Series, Timedelta, Timestamp, TimedeltaIndex,
- timedelta_range, to_timedelta)
+from pandas import (
+ Series, Timedelta, TimedeltaIndex, Timestamp, timedelta_range,
+ to_timedelta
+)
from pandas._libs.tslib import iNaT
+from pandas.core.dtypes.generic import ABCDateOffset
from pandas.tests.test_base import Ops
from pandas.tseries.offsets import Day, Hour
-from pandas.core.dtypes.generic import ABCDateOffset
class TestTimedeltaIndexOps(Ops):
diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
index 7c5f82193da6d..4dfce3dbe23a6 100644
--- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py
+++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py
@@ -1,10 +1,9 @@
-import pytest
-
import numpy as np
-import pandas.util.testing as tm
+import pytest
import pandas as pd
-from pandas import Series, timedelta_range, Timedelta
+import pandas.util.testing as tm
+from pandas import Series, Timedelta, timedelta_range
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/indexes/timedeltas/test_scalar_compat.py b/pandas/tests/indexes/timedeltas/test_scalar_compat.py
index e571ec2ccf20b..b1d8a12943dca 100644
--- a/pandas/tests/indexes/timedeltas/test_scalar_compat.py
+++ b/pandas/tests/indexes/timedeltas/test_scalar_compat.py
@@ -7,7 +7,7 @@
import pandas as pd
import pandas.util.testing as tm
-from pandas import timedelta_range, Timedelta, TimedeltaIndex, Index, Series
+from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range
class TestVectorizedTimedelta(object):
diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py
index 020e9079b3436..35b2cff13c015 100644
--- a/pandas/tests/indexes/timedeltas/test_setops.py
+++ b/pandas/tests/indexes/timedeltas/test_setops.py
@@ -2,7 +2,7 @@
import pandas as pd
import pandas.util.testing as tm
-from pandas import TimedeltaIndex, timedelta_range, Int64Index
+from pandas import Int64Index, TimedeltaIndex, timedelta_range
class TestTimedeltaIndex(object):
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py
index c329d8d15d729..9bc2e93f8468c 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta.py
@@ -1,15 +1,17 @@
-import pytest
+from datetime import timedelta
import numpy as np
-from datetime import timedelta
+import pytest
import pandas as pd
import pandas.util.testing as tm
-from pandas import (timedelta_range, date_range, Series, Timedelta,
- TimedeltaIndex, Index, DataFrame,
- Int64Index)
-from pandas.util.testing import (assert_almost_equal, assert_series_equal,
- assert_index_equal)
+from pandas import (
+ DataFrame, Index, Int64Index, Series, Timedelta, TimedeltaIndex,
+ date_range, timedelta_range
+)
+from pandas.util.testing import (
+ assert_almost_equal, assert_index_equal, assert_series_equal
+)
from ..datetimelike import DatetimeLike
diff --git a/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
index 1d10e63363cc8..e77c03465d047 100644
--- a/pandas/tests/indexes/timedeltas/test_timedelta_range.py
+++ b/pandas/tests/indexes/timedeltas/test_timedelta_range.py
@@ -1,9 +1,10 @@
-import pytest
import numpy as np
+import pytest
+
import pandas as pd
import pandas.util.testing as tm
+from pandas import timedelta_range, to_timedelta
from pandas.tseries.offsets import Day, Second
-from pandas import to_timedelta, timedelta_range
class TestTimedeltas(object):
diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/indexes/timedeltas/test_tools.py
index daa9739132d9e..95a77f1b7fe44 100644
--- a/pandas/tests/indexes/timedeltas/test_tools.py
+++ b/pandas/tests/indexes/timedeltas/test_tools.py
@@ -1,13 +1,13 @@
-import pytest
-
from datetime import time, timedelta
+
import numpy as np
+import pytest
import pandas as pd
import pandas.util.testing as tm
-from pandas.util.testing import assert_series_equal
-from pandas import Series, to_timedelta, isna, TimedeltaIndex
+from pandas import Series, TimedeltaIndex, isna, to_timedelta
from pandas._libs.tslib import iNaT
+from pandas.util.testing import assert_series_equal
class TestTimedeltas(object):
diff --git a/setup.cfg b/setup.cfg
index 83ec8c6a186e1..ad737a1f9e56f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -214,16 +214,8 @@ skip=
pandas/tests/indexes/datetimes/test_missing.py,
pandas/tests/indexes/multi/test_duplicates.py,
pandas/tests/indexes/multi/test_partial_indexing.py,
- pandas/tests/indexes/multi/test_indexing.py,
- pandas/tests/indexes/multi/test_get_set.py,
- pandas/tests/indexes/multi/test_copy.py,
- pandas/tests/indexes/multi/test_constructor.py,
pandas/tests/indexes/multi/test_names.py,
- pandas/tests/indexes/multi/test_equivalence.py,
pandas/tests/indexes/multi/test_reshape.py,
- pandas/tests/indexes/multi/test_compat.py,
- pandas/tests/indexes/multi/test_contains.py,
- pandas/tests/indexes/multi/test_sorting.py,
pandas/tests/indexes/multi/test_format.py,
pandas/tests/indexes/multi/test_set_ops.py,
pandas/tests/indexes/multi/test_monotonic.py,
| - [x] partial #23334
- [x] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Run `isort -rc pandas/tests/indexes/`
PR is capped at 20 files. So this is not all the files modified from `pandas/tests/indexes/`. So this is Part Two of this directory `pandas/tests/indexes/`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23339 | 2018-10-25T21:33:08Z | 2018-10-28T02:23:46Z | 2018-10-28T02:23:46Z | 2019-01-02T20:26:22Z |
DOC: Added MultiIndex Example for Series Min | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c1a53e1e97803..63d9b5265cdc7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9905,16 +9905,16 @@ def _add_numeric_operations(cls):
cls.any = _make_logical_function(
cls, 'any', name, name2, axis_descr, _any_desc, nanops.nanany,
- _any_examples, _any_see_also, empty_value=False)
+ _any_see_also, _any_examples, empty_value=False)
cls.all = _make_logical_function(
cls, 'all', name, name2, axis_descr, _all_desc, nanops.nanall,
- _all_examples, _all_see_also, empty_value=True)
+ _all_see_also, _all_examples, empty_value=True)
@Substitution(outname='mad',
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name, name2=name2, axis_descr=axis_descr,
- min_count='', examples='')
+ min_count='', see_also='', examples='')
@Appender(_num_doc)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
@@ -9956,7 +9956,7 @@ def mad(self, axis=None, skipna=None, level=None):
desc="Return the compound percentage of the values for "
"the requested axis.", name1=name, name2=name2,
axis_descr=axis_descr,
- min_count='', examples='')
+ min_count='', see_also='', examples='')
@Appender(_num_doc)
def compound(self, axis=None, skipna=None, level=None):
if skipna is None:
@@ -9984,8 +9984,9 @@ def compound(self, axis=None, skipna=None, level=None):
cls.sum = _make_min_count_stat_function(
cls, 'sum', name, name2, axis_descr,
- 'Return the sum of the values for the requested axis.',
- nanops.nansum, _sum_examples)
+ """Return the sum of the values for the requested axis.\n
+ This is equivalent to the method ``numpy.sum``.""",
+ nanops.nansum, _stat_func_see_also, _sum_examples)
cls.mean = _make_stat_function(
cls, 'mean', name, name2, axis_descr,
'Return the mean of the values for the requested axis.',
@@ -10004,7 +10005,7 @@ def compound(self, axis=None, skipna=None, level=None):
cls.prod = _make_min_count_stat_function(
cls, 'prod', name, name2, axis_descr,
'Return the product of the values for the requested axis.',
- nanops.nanprod, _prod_examples)
+ nanops.nanprod, examples=_prod_examples)
cls.product = cls.prod
cls.median = _make_stat_function(
cls, 'median', name, name2, axis_descr,
@@ -10012,16 +10013,16 @@ def compound(self, axis=None, skipna=None, level=None):
nanops.nanmedian)
cls.max = _make_stat_function(
cls, 'max', name, name2, axis_descr,
- """This method returns the maximum of the values in the object.
+ """Return the maximum of the values for the requested axis.\n
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
- nanops.nanmax, _max_examples)
+ nanops.nanmax, _stat_func_see_also, _max_examples)
cls.min = _make_stat_function(
cls, 'min', name, name2, axis_descr,
- """This method returns the minimum of the values in the object.
+ """Return the minimum of the values for the requested axis.\n
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
- nanops.nanmin)
+ nanops.nanmin, _stat_func_see_also, _min_examples)
@classmethod
def _add_series_only_operations(cls):
@@ -10174,21 +10175,25 @@ def _doc_parms(cls):
Parameters
----------
axis : %(axis_descr)s
-skipna : boolean, default True
+ Axis for the function to be applied on.
+skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
- particular level, collapsing into a %(name1)s
-numeric_only : boolean, default None
+ particular level, collapsing into a %(name1)s.
+numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
+**kwargs
+ Additional keyword arguments to be passed to the function.
Returns
-------
-%(outname)s : %(name1)s or %(name2)s (if level specified)\
-
-%(examples)s"""
+%(outname)s : %(name1)s or %(name2)s (if level specified)
+%(see_also)s
+%(examples)s\
+"""
_num_ddof_doc = """
%(desc)s
@@ -10692,43 +10697,49 @@ def _doc_parms(cls):
Series([], dtype: bool)
"""
-_sum_examples = """\
+_shared_docs['stat_func_example'] = """\
Examples
--------
-``MultiIndex`` series example of monthly rainfall
->>> index = pd.MultiIndex.from_product(
-... [['London', 'New York'], ['Jun', 'Jul', 'Aug']],
-... names=['city', 'month'])
->>> s = pd.Series([47, 35, 54, 112, 117, 113], index=index)
+>>> idx = pd.MultiIndex.from_arrays([
+... ['warm', 'warm', 'cold', 'cold'],
+... ['dog', 'falcon', 'fish', 'spider']],
+... names=['blooded', 'animal'])
+>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
-city month
-London Jun 47
- Jul 35
- Aug 54
-New York Jun 112
- Jul 117
- Aug 113
-dtype: int64
-
->>> s.sum()
-478
-
-Sum using level names, as well as indices
-
->>> s.sum(level='city')
-city
-London 136
-New York 342
-dtype: int64
-
->>> s.sum(level=1)
-month
-Jun 159
-Jul 152
-Aug 167
-dtype: int64
+blooded animal
+warm dog 4
+ falcon 2
+cold fish 0
+ spider 8
+Name: legs, dtype: int64
+
+>>> s.{stat_func}()
+{default_output}
+
+{verb} using level names, as well as indices.
+
+>>> s.{stat_func}(level='blooded')
+blooded
+warm {level_output_0}
+cold {level_output_1}
+Name: legs, dtype: int64
+
+>>> s.{stat_func}(level=0)
+blooded
+warm {level_output_0}
+cold {level_output_1}
+Name: legs, dtype: int64
+"""
+
+_sum_examples = _shared_docs['stat_func_example'].format(
+ stat_func='sum',
+ verb='Sum',
+ default_output=14,
+ level_output_0=6,
+ level_output_1=8)
+_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
@@ -10750,6 +10761,35 @@ def _doc_parms(cls):
nan
"""
+_max_examples = _shared_docs['stat_func_example'].format(
+ stat_func='max',
+ verb='Max',
+ default_output=8,
+ level_output_0=4,
+ level_output_1=8)
+
+_min_examples = _shared_docs['stat_func_example'].format(
+ stat_func='min',
+ verb='Min',
+ default_output=0,
+ level_output_0=2,
+ level_output_1=0)
+
+_stat_func_see_also = """
+See Also
+--------
+Series.sum : Return the sum.
+Series.min : Return the minimum.
+Series.max : Return the maximum.
+Series.idxmin : Return the index of the minimum.
+Series.idxmax : Return the index of the maximum.
+DataFrame.min : Return the sum over the requested axis.
+DataFrame.min : Return the minimum over the requested axis.
+DataFrame.max : Return the maximum over the requested axis.
+DataFrame.idxmin : Return the index of the minimum over the requested axis.
+DataFrame.idxmax : Return the index of the maximum over the requested axis.
+"""
+
_prod_examples = """\
Examples
--------
@@ -10773,45 +10813,6 @@ def _doc_parms(cls):
nan
"""
-_max_examples = """\
-Examples
---------
-``MultiIndex`` series example of monthly rainfall
-
->>> index = pd.MultiIndex.from_product(
-... [['London', 'New York'], ['Jun', 'Jul', 'Aug']],
-... names=['city', 'month'])
->>> s = pd.Series([47, 35, 54, 112, 117, 113], index=index)
->>> s
-city month
-London Jun 47
- Jul 35
- Aug 54
-New York Jun 112
- Jul 117
- Aug 113
-dtype: int64
-
->>> s.max()
-117
-
-Max using level names, as well as indices
-
->>> s.max(level='city')
-city
-London 54
-New York 117
-dtype: int64
-
->>> s.max(level=1)
-month
-Jun 112
-Jul 117
-Aug 113
-dtype: int64
-"""
-
-
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
@@ -10826,10 +10827,10 @@ def _doc_parms(cls):
def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc,
- f, examples):
+ f, see_also='', examples=''):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, min_count=_min_count_stub,
- examples=examples)
+ see_also=see_also, examples=examples)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0,
@@ -10854,9 +10855,10 @@ def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f,
- examples=''):
+ see_also='', examples=''):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
- axis_descr=axis_descr, min_count='', examples=examples)
+ axis_descr=axis_descr, min_count='', see_also=see_also,
+ examples=examples)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
@@ -10933,9 +10935,9 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs):
def _make_logical_function(cls, name, name1, name2, axis_descr, desc, f,
- examples, see_also, empty_value):
+ see_also, examples, empty_value):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
- axis_descr=axis_descr, examples=examples, see_also=see_also,
+ axis_descr=axis_descr, see_also=see_also, examples=examples,
empty_value=empty_value)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None,
| Corollary to #23298
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23338 | 2018-10-25T21:27:04Z | 2018-12-25T00:41:23Z | 2018-12-25T00:41:23Z | 2018-12-31T15:14:47Z |
Partialy fix issue #23334 - isort pandas/core/dtypes directory | diff --git a/pandas/core/dtypes/api.py b/pandas/core/dtypes/api.py
index 7bf3912b05b1d..76021705563bf 100644
--- a/pandas/core/dtypes/api.py
+++ b/pandas/core/dtypes/api.py
@@ -1,59 +1,14 @@
# flake8: noqa
-from .common import (pandas_dtype,
- is_dtype_equal,
- is_extension_type,
-
- # categorical
- is_categorical,
- is_categorical_dtype,
-
- # interval
- is_interval,
- is_interval_dtype,
-
- # datetimelike
- is_datetimetz,
- is_datetime64_dtype,
- is_datetime64tz_dtype,
- is_datetime64_any_dtype,
- is_datetime64_ns_dtype,
- is_timedelta64_dtype,
- is_timedelta64_ns_dtype,
- is_period,
- is_period_dtype,
-
- # string-like
- is_string_dtype,
- is_object_dtype,
-
- # sparse
- is_sparse,
-
- # numeric types
- is_scalar,
- is_sparse,
- is_bool,
- is_integer,
- is_float,
- is_complex,
- is_number,
- is_integer_dtype,
- is_int64_dtype,
- is_numeric_dtype,
- is_float_dtype,
- is_bool_dtype,
- is_complex_dtype,
- is_signed_integer_dtype,
- is_unsigned_integer_dtype,
-
- # like
- is_re,
- is_re_compilable,
- is_dict_like,
- is_iterator,
- is_file_like,
- is_array_like,
- is_list_like,
- is_hashable,
- is_named_tuple)
+from .common import (
+ is_array_like, is_bool, is_bool_dtype, is_categorical,
+ is_categorical_dtype, is_complex, is_complex_dtype,
+ is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
+ is_datetime64tz_dtype, is_datetimetz, is_dict_like, is_dtype_equal,
+ is_extension_type, is_file_like, is_float, is_float_dtype, is_hashable,
+ is_int64_dtype, is_integer, is_integer_dtype, is_interval,
+ is_interval_dtype, is_iterator, is_list_like, is_named_tuple, is_number,
+ is_numeric_dtype, is_object_dtype, is_period, is_period_dtype, is_re,
+ is_re_compilable, is_scalar, is_signed_integer_dtype, is_sparse,
+ is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype,
+ is_unsigned_integer_dtype, pandas_dtype)
diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py
index ac4d6d1590f38..df0e89cced816 100644
--- a/pandas/core/dtypes/base.py
+++ b/pandas/core/dtypes/base.py
@@ -1,10 +1,12 @@
"""Extend pandas with custom array types"""
import numpy as np
-from pandas import compat
-from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCDataFrame
from pandas.errors import AbstractMethodError
+from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
+
+from pandas import compat
+
class _DtypeOpsMixin(object):
# Not all of pandas' extension dtypes are compatibile with
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 47e17c9868cd7..c7c6f89eb13a4 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -4,36 +4,25 @@
import numpy as np
-from pandas._libs import tslib, lib, tslibs
-from pandas._libs.tslibs import iNaT, OutOfBoundsDatetime, Period
-from pandas.compat import string_types, text_type, PY3
-from .common import (ensure_object, is_bool, is_integer, is_float,
- is_complex, is_datetimetz, is_categorical_dtype,
- is_datetimelike,
- is_extension_type,
- is_extension_array_dtype,
- is_object_dtype,
- is_datetime64tz_dtype, is_datetime64_dtype,
- is_datetime64_ns_dtype,
- is_timedelta64_dtype, is_timedelta64_ns_dtype,
- is_dtype_equal,
- is_float_dtype, is_complex_dtype,
- is_integer_dtype,
- is_unsigned_integer_dtype,
- is_datetime_or_timedelta_dtype,
- is_bool_dtype, is_scalar,
- is_string_dtype, _string_dtypes,
- pandas_dtype,
- ensure_int8, ensure_int16,
- ensure_int32, ensure_int64,
- _NS_DTYPE, _TD_DTYPE, _INT64_DTYPE,
- _POSSIBLY_CAST_DTYPES)
-from .dtypes import (ExtensionDtype, PandasExtensionDtype, DatetimeTZDtype,
- PeriodDtype)
-from .generic import (ABCDatetimeIndex, ABCPeriodIndex,
- ABCSeries)
-from .missing import isna, notna
+from pandas._libs import lib, tslib, tslibs
+from pandas._libs.tslibs import OutOfBoundsDatetime, Period, iNaT
+from pandas.compat import PY3, string_types, text_type
+
+from .common import (
+ _INT64_DTYPE, _NS_DTYPE, _POSSIBLY_CAST_DTYPES, _TD_DTYPE, _string_dtypes,
+ ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object,
+ is_bool, is_bool_dtype, is_categorical_dtype, is_complex, is_complex_dtype,
+ is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype,
+ is_datetime_or_timedelta_dtype, is_datetimelike, is_datetimetz,
+ is_dtype_equal, is_extension_array_dtype, is_extension_type, is_float,
+ is_float_dtype, is_integer, is_integer_dtype, is_object_dtype, is_scalar,
+ is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype,
+ is_unsigned_integer_dtype, pandas_dtype)
+from .dtypes import (
+ DatetimeTZDtype, ExtensionDtype, PandasExtensionDtype, PeriodDtype)
+from .generic import ABCDatetimeIndex, ABCPeriodIndex, ABCSeries
from .inference import is_list_like
+from .missing import isna, notna
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py
index 938392ebfc96d..94e9b72b001b1 100644
--- a/pandas/core/dtypes/common.py
+++ b/pandas/core/dtypes/common.py
@@ -1,25 +1,24 @@
""" common type operations """
import numpy as np
-from pandas.compat import (string_types, text_type, binary_type,
- PY3, PY36)
+
from pandas._libs import algos, lib
-from pandas._libs.tslibs import conversion, Period, Timestamp
from pandas._libs.interval import Interval
+from pandas._libs.tslibs import Period, Timestamp, conversion
+from pandas.compat import PY3, PY36, binary_type, string_types, text_type
from pandas.core.dtypes.dtypes import (
- registry, CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype,
- PeriodDtype, IntervalDtype,
- PandasExtensionDtype, ExtensionDtype,
- _pandas_registry)
+ CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, ExtensionDtype,
+ IntervalDtype, PandasExtensionDtype, PeriodDtype, _pandas_registry,
+ registry)
from pandas.core.dtypes.generic import (
- ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries,
- ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, ABCIndexClass,
- ABCDateOffset, ABCPeriodArray)
+ ABCCategorical, ABCCategoricalIndex, ABCDateOffset, ABCDatetimeIndex,
+ ABCIndexClass, ABCPeriodArray, ABCPeriodIndex, ABCSeries, ABCSparseArray,
+ ABCSparseSeries)
from pandas.core.dtypes.inference import ( # noqa:F401
- is_bool, is_integer, is_float, is_number, is_decimal, is_complex,
- is_re, is_re_compilable, is_dict_like, is_string_like, is_file_like,
- is_list_like, is_nested_list_like, is_sequence, is_named_tuple,
- is_hashable, is_iterator, is_array_like, is_scalar, is_interval)
+ is_array_like, is_bool, is_complex, is_decimal, is_dict_like, is_file_like,
+ is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like,
+ is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable,
+ is_scalar, is_sequence, is_string_like)
_POSSIBLY_CAST_DTYPES = {np.dtype(t).name
for t in ['O', 'int8', 'uint8', 'int16', 'uint16',
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index 702a0246a95dd..b2999c112e8ab 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -3,25 +3,19 @@
"""
import numpy as np
+
from pandas._libs import tslib, tslibs
-from pandas import compat
+
from pandas.core.dtypes.common import (
- is_categorical_dtype,
- is_sparse,
- is_extension_array_dtype,
- is_datetimetz,
- is_datetime64_dtype,
- is_timedelta64_dtype,
- is_period_dtype,
- is_object_dtype,
- is_bool_dtype,
- is_interval_dtype,
- is_dtype_equal,
- _NS_DTYPE,
- _TD_DTYPE)
+ _NS_DTYPE, _TD_DTYPE, is_bool_dtype, is_categorical_dtype,
+ is_datetime64_dtype, is_datetimetz, is_dtype_equal,
+ is_extension_array_dtype, is_interval_dtype, is_object_dtype,
+ is_period_dtype, is_sparse, is_timedelta64_dtype)
from pandas.core.dtypes.generic import (
- ABCDatetimeIndex, ABCTimedeltaIndex,
- ABCPeriodIndex, ABCRangeIndex, ABCSparseDataFrame)
+ ABCDatetimeIndex, ABCPeriodIndex, ABCRangeIndex, ABCSparseDataFrame,
+ ABCTimedeltaIndex)
+
+from pandas import compat
def get_dtype_kinds(l):
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index 961c8f1dbe537..4dfefdec031b2 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -1,11 +1,15 @@
""" define extension dtypes """
import re
+
import numpy as np
-from pandas import compat
-from pandas.core.dtypes.generic import ABCIndexClass, ABCCategoricalIndex
-from pandas._libs.tslibs import Period, NaT, Timestamp
+
from pandas._libs.interval import Interval
+from pandas._libs.tslibs import NaT, Period, Timestamp
+
+from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCIndexClass
+
+from pandas import compat
from .base import ExtensionDtype, _DtypeOpsMixin
diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py
index 7470497383064..d56bd83f01236 100644
--- a/pandas/core/dtypes/inference.py
+++ b/pandas/core/dtypes/inference.py
@@ -1,12 +1,15 @@
""" basic inference routines """
+from numbers import Number
import re
+
import numpy as np
-from numbers import Number
-from pandas import compat
-from pandas.compat import (PY2, string_types, text_type,
- string_and_binary_types, re_type, Set)
+
from pandas._libs import lib
+from pandas.compat import (
+ PY2, Set, re_type, string_and_binary_types, string_types, text_type)
+
+from pandas import compat
is_bool = lib.is_bool
diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py
index 1800c32add9b1..a12985bb7d42c 100644
--- a/pandas/core/dtypes/missing.py
+++ b/pandas/core/dtypes/missing.py
@@ -2,27 +2,19 @@
missing types & inference
"""
import numpy as np
+
from pandas._libs import lib, missing as libmissing
from pandas._libs.tslibs import NaT, iNaT
-from .generic import (ABCMultiIndex, ABCSeries,
- ABCIndexClass, ABCGeneric,
- ABCExtensionArray)
-from .common import (is_string_dtype, is_datetimelike,
- is_datetimelike_v_numeric, is_float_dtype,
- is_datetime64_dtype, is_datetime64tz_dtype,
- is_timedelta64_dtype,
- is_period_dtype,
- is_complex_dtype,
- is_string_like_dtype, is_bool_dtype,
- is_integer_dtype, is_dtype_equal,
- is_extension_array_dtype,
- needs_i8_conversion, ensure_object,
- pandas_dtype,
- is_scalar,
- is_object_dtype,
- is_integer,
- _TD_DTYPE,
- _NS_DTYPE)
+
+from .common import (
+ _NS_DTYPE, _TD_DTYPE, ensure_object, is_bool_dtype, is_complex_dtype,
+ is_datetime64_dtype, is_datetime64tz_dtype, is_datetimelike,
+ is_datetimelike_v_numeric, is_dtype_equal, is_extension_array_dtype,
+ is_float_dtype, is_integer, is_integer_dtype, is_object_dtype,
+ is_period_dtype, is_scalar, is_string_dtype, is_string_like_dtype,
+ is_timedelta64_dtype, needs_i8_conversion, pandas_dtype)
+from .generic import (
+ ABCExtensionArray, ABCGeneric, ABCIndexClass, ABCMultiIndex, ABCSeries)
from .inference import is_list_like
isposinf_scalar = libmissing.isposinf_scalar
diff --git a/setup.cfg b/setup.cfg
index 1cfefa1bbaadd..6503f9673c934 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -162,14 +162,6 @@ skip=
pandas/core/tools/numeric.py,
pandas/core/tools/timedeltas.py,
pandas/core/tools/datetimes.py,
- pandas/core/dtypes/concat.py,
- pandas/core/dtypes/cast.py,
- pandas/core/dtypes/api.py,
- pandas/core/dtypes/dtypes.py,
- pandas/core/dtypes/base.py,
- pandas/core/dtypes/common.py,
- pandas/core/dtypes/missing.py,
- pandas/core/dtypes/inference.py,
pandas/core/internals/concat.py,
pandas/core/internals/managers.py,
pandas/core/internals/blocks.py,
| The imports have been sorted with isort in the pandas/core/dtypes directory.
Moving onto sorting the pandas/core/groupby directory.
- [x] partly #23334
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23336 | 2018-10-25T20:57:52Z | 2018-10-31T13:04:50Z | 2018-10-31T13:04:50Z | 2018-10-31T13:04:54Z |
CLN: isort imports - io dir | diff --git a/pandas/io/api.py b/pandas/io/api.py
index f542a8176dce7..496a00126de87 100644
--- a/pandas/io/api.py
+++ b/pandas/io/api.py
@@ -4,20 +4,21 @@
# flake8: noqa
-from pandas.io.parsers import read_csv, read_table, read_fwf
from pandas.io.clipboards import read_clipboard
from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
-from pandas.io.pytables import HDFStore, get_store, read_hdf
-from pandas.io.json import read_json
-from pandas.io.html import read_html
-from pandas.io.sql import read_sql, read_sql_table, read_sql_query
-from pandas.io.sas import read_sas
from pandas.io.feather_format import read_feather
+from pandas.io.gbq import read_gbq
+from pandas.io.html import read_html
+from pandas.io.json import read_json
+from pandas.io.packers import read_msgpack, to_msgpack
from pandas.io.parquet import read_parquet
-from pandas.io.stata import read_stata
+from pandas.io.parsers import read_csv, read_fwf, read_table
from pandas.io.pickle import read_pickle, to_pickle
-from pandas.io.packers import read_msgpack, to_msgpack
-from pandas.io.gbq import read_gbq
+from pandas.io.pytables import HDFStore, get_store, read_hdf
+from pandas.io.sas import read_sas
+from pandas.io.sql import read_sql, read_sql_query, read_sql_table
+from pandas.io.stata import read_stata
+
# deprecation, xref #13790
def Term(*args, **kwargs):
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py
index 70c978a3b62ed..287a03c2e5728 100644
--- a/pandas/io/clipboards.py
+++ b/pandas/io/clipboards.py
@@ -1,10 +1,9 @@
""" io on the clipboard """
import warnings
-from pandas.compat import StringIO, PY2, PY3
-
-from pandas.core.dtypes.generic import ABCDataFrame
from pandas import compat, get_option, option_context
+from pandas.compat import PY2, PY3, StringIO
+from pandas.core.dtypes.generic import ABCDataFrame
def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover
diff --git a/pandas/io/common.py b/pandas/io/common.py
index 2056c25ddc5f4..40247c4aebd1e 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -1,21 +1,21 @@
"""Common IO api utilities"""
-import os
-import csv
import codecs
+import csv
import mmap
-from contextlib import contextmanager, closing
+import os
import zipfile
+from contextlib import closing, contextmanager
-from pandas.compat import StringIO, BytesIO, string_types, text_type
-from pandas import compat
-from pandas.io.formats.printing import pprint_thing
import pandas.core.common as com
-from pandas.core.dtypes.common import is_number, is_file_like
-
+from pandas import compat
+from pandas.compat import BytesIO, StringIO, string_types, text_type
+from pandas.core.dtypes.common import is_file_like, is_number
# compat
-from pandas.errors import (ParserError, DtypeWarning, # noqa
- EmptyDataError, ParserWarning)
+from pandas.errors import ( # noqa
+ DtypeWarning, EmptyDataError, ParserError, ParserWarning
+)
+from pandas.io.formats.printing import pprint_thing
# gh-12665: Alias for now and remove later.
CParserError = ParserError
diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py
index 377373f8a0135..1a22ee7240d59 100644
--- a/pandas/io/date_converters.py
+++ b/pandas/io/date_converters.py
@@ -1,7 +1,8 @@
"""This module is designed for community supported date conversion functions"""
-from pandas.compat import range, map
import numpy as np
+
from pandas._libs.tslibs import parsing
+from pandas.compat import map, range
def parse_date_time(date_col, time_col):
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index c1cbccb7cbf1c..c6a04b9bdee20 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -2,39 +2,37 @@
Module parse to/from Excel
"""
-# ---------------------------------------------------------------------
-# ExcelFile class
-from datetime import datetime, date, time, MINYEAR, timedelta
-
-import os
import abc
+import os
import warnings
-from textwrap import fill
-from io import UnsupportedOperation
+# ---------------------------------------------------------------------
+# ExcelFile class
+from datetime import MINYEAR, date, datetime, time, timedelta
from distutils.version import LooseVersion
+from io import UnsupportedOperation
+from textwrap import fill
import numpy as np
import pandas._libs.json as json
-from pandas.util._decorators import Appender, deprecate_kwarg
-from pandas.errors import EmptyDataError
-
import pandas.compat as compat
-from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass,
- string_types, OrderedDict)
-
-from pandas.core.dtypes.common import (
- is_integer, is_float,
- is_bool, is_list_like)
-
+from pandas.compat import (
+ OrderedDict, add_metaclass, lrange, map, range, reduce, string_types, u,
+ zip
+)
from pandas.core import config
+from pandas.core.dtypes.common import (
+ is_bool, is_float, is_integer, is_list_like
+)
from pandas.core.frame import DataFrame
-
-from pandas.io.parsers import TextParser
-from pandas.io.common import (_is_url, _urlopen, _validate_header_arg,
- get_filepath_or_buffer, _NA_VALUES,
- _stringify_path)
+from pandas.errors import EmptyDataError
+from pandas.io.common import (
+ _NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_arg,
+ get_filepath_or_buffer
+)
from pandas.io.formats.printing import pprint_thing
+from pandas.io.parsers import TextParser
+from pandas.util._decorators import Appender, deprecate_kwarg
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 1bc6526214a91..436c16839ffc2 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -1,7 +1,8 @@
""" feather-format compat """
from distutils.version import LooseVersion
-from pandas import DataFrame, RangeIndex, Int64Index
+
+from pandas import DataFrame, Int64Index, RangeIndex
from pandas.compat import range
from pandas.io.common import _stringify_path
diff --git a/pandas/io/html.py b/pandas/io/html.py
index 04534ff591a2c..a1d3f4589ba35 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -3,22 +3,22 @@
"""
+import numbers
import os
import re
-import numbers
-
from distutils.version import LooseVersion
+import pandas.core.common as com
+from pandas import Series, compat
+from pandas.compat import (
+ binary_type, iteritems, lmap, lrange, raise_with_traceback, string_types,
+ u
+)
from pandas.core.dtypes.common import is_list_like
from pandas.errors import EmptyDataError
-from pandas.io.common import _is_url, urlopen, _validate_header_arg
-from pandas.io.parsers import TextParser
-from pandas import compat
-from pandas.compat import (lrange, lmap, u, string_types, iteritems,
- raise_with_traceback, binary_type)
-from pandas import Series
-import pandas.core.common as com
+from pandas.io.common import _is_url, _validate_header_arg, urlopen
from pandas.io.formats.printing import pprint_thing
+from pandas.io.parsers import TextParser
_IMPORTS = False
_HAS_BS4 = False
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 6ab56c68a510a..aef1d84a19bc7 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -1,10 +1,11 @@
""" parquet compat """
-from warnings import catch_warnings
from distutils.version import LooseVersion
-from pandas import DataFrame, RangeIndex, Int64Index, get_option
-from pandas.compat import string_types
+from warnings import catch_warnings
+
import pandas.core.common as com
+from pandas import DataFrame, Int64Index, RangeIndex, get_option
+from pandas.compat import string_types
from pandas.io.common import get_filepath_or_buffer, is_s3_url
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index eeba30ed8a44f..8dd50fceb4efb 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -2,50 +2,51 @@
Module contains tools for processing files into DataFrames or other objects
"""
from __future__ import print_function
-from collections import defaultdict
-import re
+
import csv
+import datetime
+import re
import sys
import warnings
-import datetime
+from collections import defaultdict
from textwrap import fill
import numpy as np
+import pandas._libs.lib as lib
+import pandas._libs.ops as libops
+import pandas._libs.parsers as parsers
+import pandas.core.common as com
from pandas import compat
-from pandas.compat import (range, lrange, PY3, StringIO, lzip,
- zip, string_types, map, u)
+from pandas._libs.tslibs import parsing
+from pandas.compat import (
+ PY3, StringIO, lrange, lzip, map, range, string_types, u, zip
+)
+from pandas.core import algorithms
+from pandas.core.arrays import Categorical
+from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
- is_integer, ensure_object,
- is_list_like, is_integer_dtype,
- is_float, is_dtype_equal,
- is_object_dtype, is_string_dtype,
- is_scalar, is_categorical_dtype)
+ ensure_object, is_categorical_dtype, is_dtype_equal, is_float, is_integer,
+ is_integer_dtype, is_list_like, is_object_dtype, is_scalar,
+ is_string_dtype
+)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
-from pandas.core.dtypes.cast import astype_nansafe
-from pandas.core.index import (Index, MultiIndex, RangeIndex,
- ensure_index_from_sequences)
-from pandas.core.series import Series
from pandas.core.frame import DataFrame
-from pandas.core.arrays import Categorical
-from pandas.core import algorithms
-import pandas.core.common as com
-from pandas.io.date_converters import generic_parser
-from pandas.errors import ParserWarning, ParserError, EmptyDataError
-from pandas.io.common import (get_filepath_or_buffer, is_file_like,
- _validate_header_arg, _get_handle,
- UnicodeReader, UTF8Recoder, _NA_VALUES,
- BaseIterator, _infer_compression)
+from pandas.core.index import (
+ Index, MultiIndex, RangeIndex, ensure_index_from_sequences
+)
+from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
-
+from pandas.errors import EmptyDataError, ParserError, ParserWarning
+from pandas.io.common import (
+ _NA_VALUES, BaseIterator, UnicodeReader, UTF8Recoder, _get_handle,
+ _infer_compression, _validate_header_arg, get_filepath_or_buffer,
+ is_file_like
+)
+from pandas.io.date_converters import generic_parser
from pandas.util._decorators import Appender
-import pandas._libs.lib as lib
-import pandas._libs.parsers as parsers
-import pandas._libs.ops as libops
-from pandas._libs.tslibs import parsing
-
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index d52a571da0d61..4e71462f4e331 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -3,8 +3,9 @@
import numpy as np
from numpy.lib.format import read_array, write_array
-from pandas.compat import BytesIO, cPickle as pkl, pickle_compat as pc, PY3
-from pandas.core.dtypes.common import is_datetime64_dtype, _NS_DTYPE
+
+from pandas.compat import PY3, BytesIO, cPickle as pkl, pickle_compat as pc
+from pandas.core.dtypes.common import _NS_DTYPE, is_datetime64_dtype
from pandas.io.common import _get_handle, _stringify_path
diff --git a/pandas/io/s3.py b/pandas/io/s3.py
index 7d1360934fd53..4998e4c0400ac 100644
--- a/pandas/io/s3.py
+++ b/pandas/io/s3.py
@@ -1,5 +1,6 @@
""" s3 support for remote file interactivity """
from pandas import compat
+
try:
import s3fs
from botocore.exceptions import NoCredentialsError
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 385396909a07b..c326580487b09 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -8,17 +8,16 @@
https://support.sas.com/techsup/technote/ts140.pdf
"""
-from datetime import datetime
import struct
import warnings
+from datetime import datetime
import numpy as np
-from pandas.util._decorators import Appender
-from pandas import compat
-
-from pandas.io.common import get_filepath_or_buffer, BaseIterator
import pandas as pd
+from pandas import compat
+from pandas.io.common import BaseIterator, get_filepath_or_buffer
+from pandas.util._decorators import Appender
_correct_line1 = ("HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 882fa0092b2cf..3876792d6226f 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -4,28 +4,28 @@
retrieval and to reduce dependency on DB-specific API.
"""
-from __future__ import print_function, division
-from datetime import datetime, date, time
+from __future__ import division, print_function
-import warnings
import re
+import warnings
+from contextlib import contextmanager
+from datetime import date, datetime, time
+
import numpy as np
import pandas._libs.lib as lib
-from pandas.core.dtypes.missing import isna
-from pandas.core.dtypes.dtypes import DatetimeTZDtype
-from pandas.core.dtypes.common import (
- is_list_like, is_dict_like,
- is_datetime64tz_dtype)
-
-from pandas.compat import (map, zip, raise_with_traceback,
- string_types, text_type)
+from pandas.compat import (
+ map, raise_with_traceback, string_types, text_type, zip
+)
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
+from pandas.core.dtypes.common import (
+ is_datetime64tz_dtype, is_dict_like, is_list_like
+)
+from pandas.core.dtypes.dtypes import DatetimeTZDtype
+from pandas.core.dtypes.missing import isna
from pandas.core.tools.datetimes import to_datetime
-from contextlib import contextmanager
-
class SQLAlchemyRequired(ImportError):
pass
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 68b2182c2ff07..00d1a92587930 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -13,27 +13,29 @@
import datetime
import struct
import sys
-from collections import OrderedDict
import warnings
+from collections import OrderedDict
import numpy as np
from dateutil.relativedelta import relativedelta
+from pandas import DatetimeIndex, compat, isna, to_datetime, to_timedelta
from pandas._libs.lib import infer_dtype
from pandas._libs.tslibs import NaT, Timestamp
from pandas._libs.writers import max_len_string_array
-
-from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex
-from pandas.compat import (lrange, lmap, lzip, text_type, string_types, range,
- zip, BytesIO)
+from pandas.compat import (
+ BytesIO, lmap, lrange, lzip, range, string_types, text_type, zip
+)
from pandas.core.arrays import Categorical
from pandas.core.base import StringMixin
-from pandas.core.dtypes.common import (is_categorical_dtype, ensure_object,
- is_datetime64_dtype)
+from pandas.core.dtypes.common import (
+ ensure_object, is_categorical_dtype, is_datetime64_dtype
+)
from pandas.core.frame import DataFrame
from pandas.core.series import Series
-from pandas.io.common import (get_filepath_or_buffer, BaseIterator,
- _stringify_path)
+from pandas.io.common import (
+ BaseIterator, _stringify_path, get_filepath_or_buffer
+)
from pandas.util._decorators import Appender, deprecate_kwarg
_version_error = ("Version of given Stata file is not 104, 105, 108, "
diff --git a/setup.cfg b/setup.cfg
index ee39844996025..de3bd356e8f55 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -96,21 +96,6 @@ skip=
pandas/util/_print_versions.py,
pandas/util/_decorators.py,
pandas/util/_test_decorators.py,
- pandas/io/s3.py,
- pandas/io/parquet.py,
- pandas/io/feather_format.py,
- pandas/io/api.py,
- pandas/io/sql.py,
- pandas/io/clipboards.py,
- pandas/io/excel.py,
- pandas/io/date_converters.py,
- pandas/io/testing.py,
- pandas/io/common.py,
- pandas/io/parsers.py,
- pandas/io/html.py,
- pandas/io/pickle.py,
- pandas/io/stata.py,
- pandas/io/sas/sas_xport.py,
pandas/io/sas/sas7bdat.py,
pandas/io/formats/console.py,
pandas/io/formats/excel.py,
| Signed-off-by: alimcmaster1 <alimcmaster1@gmail.com>
Files in `pandas/io/*` ( 1 of 2)
- [x] xref #23334
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23332 | 2018-10-25T16:49:28Z | 2018-10-25T23:59:30Z | 2018-10-25T23:59:30Z | 2018-10-26T00:00:03Z |
CLN: isort imports- util dir | diff --git a/pandas/_version.py b/pandas/_version.py
index f4c8938c683da..036c927df45d3 100644
--- a/pandas/_version.py
+++ b/pandas/_version.py
@@ -12,6 +12,7 @@
import re
import subprocess
import sys
+
from pandas.compat import PY3
diff --git a/pandas/conftest.py b/pandas/conftest.py
index b8ba347651ae6..6142f188f5613 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -1,15 +1,15 @@
-import os
import importlib
+import os
+import hypothesis
+from hypothesis import strategies as st
+import numpy as np
import pytest
-import pandas
-import numpy as np
-import pandas as pd
from pandas.compat import PY3
import pandas.util._test_decorators as td
-import hypothesis
+import pandas as pd
hypothesis.settings.register_profile(
"ci",
@@ -285,7 +285,7 @@ def deco(*args):
@pytest.fixture
def iris(datapath):
"""The iris dataset as a DataFrame."""
- return pandas.read_csv(datapath('data', 'iris.csv'))
+ return pd.read_csv(datapath('data', 'iris.csv'))
@pytest.fixture(params=['nlargest', 'nsmallest'])
@@ -512,7 +512,6 @@ def mock():
# ----------------------------------------------------------------
# Global setup for tests using Hypothesis
-from hypothesis import strategies as st
# Registering these strategies makes them globally available via st.from_type,
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
diff --git a/pandas/testing.py b/pandas/testing.py
index 3baf99957cb33..dbea1ecc7362a 100644
--- a/pandas/testing.py
+++ b/pandas/testing.py
@@ -5,4 +5,4 @@
"""
from pandas.util.testing import (
- assert_frame_equal, assert_series_equal, assert_index_equal)
+ assert_frame_equal, assert_index_equal, assert_series_equal)
diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py
index 82cd44113cb25..638282f322c74 100644
--- a/pandas/util/_decorators.py
+++ b/pandas/util/_decorators.py
@@ -1,9 +1,10 @@
-from pandas.compat import callable, signature, PY2
-from pandas._libs.properties import cache_readonly # noqa
+from functools import WRAPPER_ASSIGNMENTS, update_wrapper, wraps
import inspect
-import warnings
from textwrap import dedent, wrap
-from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS
+import warnings
+
+from pandas._libs.properties import cache_readonly # noqa
+from pandas.compat import PY2, callable, signature
def deprecate(name, alternative, version, alt_name=None,
diff --git a/pandas/util/_depr_module.py b/pandas/util/_depr_module.py
index 9c648b76fdad1..2c8feec798c66 100644
--- a/pandas/util/_depr_module.py
+++ b/pandas/util/_depr_module.py
@@ -3,8 +3,8 @@
It is for internal use only and should not be used beyond this purpose.
"""
-import warnings
import importlib
+import warnings
class _DeprecatedModule(object):
diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py
index c9e6e27363aed..4aee0a2e5350e 100644
--- a/pandas/util/_doctools.py
+++ b/pandas/util/_doctools.py
@@ -1,7 +1,9 @@
import numpy as np
-import pandas as pd
+
import pandas.compat as compat
+import pandas as pd
+
class TablePlotter(object):
"""
diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py
index 03fc82a3acef5..3016bf04b5258 100644
--- a/pandas/util/_print_versions.py
+++ b/pandas/util/_print_versions.py
@@ -1,11 +1,11 @@
+import codecs
+import importlib
+import locale
import os
import platform
-import sys
import struct
import subprocess
-import codecs
-import locale
-import importlib
+import sys
def get_sys_info():
diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py
index 2fe891346065d..52a6740f119b7 100644
--- a/pandas/util/_test_decorators.py
+++ b/pandas/util/_test_decorators.py
@@ -23,15 +23,17 @@ def test_foo():
For more information, refer to the ``pytest`` documentation on ``skipif``.
"""
-import pytest
-import locale
from distutils.version import LooseVersion
+import locale
-from pandas.compat import (is_platform_windows, is_platform_32bit, PY3,
- import_lzma)
+import pytest
+
+from pandas.compat import (
+ PY3, import_lzma, is_platform_32bit, is_platform_windows)
from pandas.compat.numpy import _np_version_under1p15
-from pandas.core.computation.expressions import (_USE_NUMEXPR,
- _NUMEXPR_INSTALLED)
+
+from pandas.core.computation.expressions import (
+ _NUMEXPR_INSTALLED, _USE_NUMEXPR)
def safe_import(mod_name, min_version=None):
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 8871cac6f6af6..96387349eecd7 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1,57 +1,44 @@
from __future__ import division
-# pylint: disable-msg=W0402
+from contextlib import contextmanager
+from datetime import datetime
+from functools import wraps
+import locale
+import os
import re
import string
+import subprocess
import sys
import tempfile
-import warnings
-import os
-import subprocess
-import locale
import traceback
+import warnings
-from datetime import datetime
-from functools import wraps
-from contextlib import contextmanager
-
-from numpy.random import randn, rand
import numpy as np
+from numpy.random import rand, randn
-import pandas as pd
-from pandas.core.arrays import (
- ExtensionArray,
- IntervalArray,
- PeriodArray,
-)
-from pandas.core.dtypes.missing import array_equivalent
-from pandas.core.dtypes.common import (
- is_datetimelike_v_numeric,
- is_datetimelike_v_object,
- is_number, is_bool,
- needs_i8_conversion,
- is_categorical_dtype,
- is_interval_dtype,
- is_sequence,
- is_list_like,
- is_extension_array_dtype)
-from pandas.io.formats.printing import pprint_thing
-from pandas.core.algorithms import take_1d
-import pandas.core.common as com
-
+from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
- filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
- raise_with_traceback, httplib, StringIO, string_types, PY3, PY2)
+ PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
+ map, raise_with_traceback, range, string_types, u, unichr, zip)
+
+from pandas.core.dtypes.common import (
+ is_bool, is_categorical_dtype, is_datetimelike_v_numeric,
+ is_datetimelike_v_object, is_extension_array_dtype, is_interval_dtype,
+ is_list_like, is_number, is_sequence, needs_i8_conversion)
+from pandas.core.dtypes.missing import array_equivalent
-from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,
- DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,
- Index, MultiIndex,
- Series, DataFrame, Panel)
+import pandas as pd
+from pandas import (
+ Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
+ IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
+ TimedeltaIndex, bdate_range)
+from pandas.core.algorithms import take_1d
+from pandas.core.arrays import ExtensionArray, IntervalArray, PeriodArray
+import pandas.core.common as com
-from pandas._libs import testing as _testing
from pandas.io.common import urlopen
-
+from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
diff --git a/setup.cfg b/setup.cfg
index 1cfefa1bbaadd..edd3b507cb183 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -89,19 +89,6 @@ force_grid_wrap=0
combine_as_imports=True
force_sort_within_sections=True
skip=
- pandas/lib.py,
- pandas/tslib.py,
- pandas/testing.py,
- pandas/conftest.py,
- pandas/_version.py,
- pandas/parser.py,
- pandas/util/_depr_module.py,
- pandas/util/testing.py,
- pandas/util/_doctools.py,
- pandas/util/decorators.py,
- pandas/util/_print_versions.py,
- pandas/util/_decorators.py,
- pandas/util/_test_decorators.py,
pandas/io/sas/sas7bdat.py,
pandas/io/formats/console.py,
pandas/io/formats/excel.py,
| Signed-off-by: alimcmaster1 <alimcmaster1@gmail.com>
util directory/.py files in pandas directory.
- [x] xref #23334
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23331 | 2018-10-25T16:46:52Z | 2018-10-30T12:24:16Z | 2018-10-30T12:24:16Z | 2018-10-30T12:26:14Z |
REF: SparseArray imports | diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 72527cfa5d12e..08c961935a990 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -2,57 +2,47 @@
SparseArray data structure
"""
from __future__ import division
-# pylint: disable=E1101,E1103,W0231
-import re
-import operator
import numbers
-import numpy as np
+import operator
+import re
import warnings
-import pandas as pd
-from pandas.core.base import PandasObject
+import numpy as np
+import pandas._libs.sparse as splib
+import pandas.core.algorithms as algos
+import pandas.core.common as com
+import pandas.io.formats.printing as printing
from pandas import compat
-from pandas.errors import PerformanceWarning
+from pandas._libs import index as libindex, lib
+from pandas._libs.sparse import BlockIndex, IntIndex
+from pandas._libs.tslibs import NaT
from pandas.compat.numpy import function as nv
-
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
-import pandas.core.common as com
+from pandas.core.base import PandasObject
from pandas.core.dtypes.base import ExtensionDtype
+from pandas.core.dtypes.cast import (
+ astype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
+ infer_dtype_from_scalar, maybe_convert_platform
+)
+from pandas.core.dtypes.common import (
+ is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_dtype_equal,
+ is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
+ pandas_dtype
+)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
- ABCSparseSeries, ABCSeries, ABCIndexClass
+ ABCIndexClass, ABCSeries, ABCSparseSeries
)
-from pandas.core.dtypes.common import (
- is_datetime64_any_dtype,
- is_integer,
- is_object_dtype,
- is_array_like,
- pandas_dtype,
- is_bool_dtype,
- is_list_like,
- is_string_dtype,
- is_scalar, is_dtype_equal)
-from pandas.core.dtypes.cast import (
- maybe_convert_platform,
- astype_nansafe, find_common_type, infer_dtype_from_scalar,
- construct_1d_arraylike_from_scalar)
-from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype
+from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
from pandas.core.missing import interpolate_2d
-
-import pandas._libs.sparse as splib
-from pandas._libs.sparse import BlockIndex, IntIndex
-from pandas._libs import index as libindex
-from pandas._libs import lib
-import pandas.core.algorithms as algos
-import pandas.io.formats.printing as printing
+from pandas.errors import PerformanceWarning
# ----------------------------------------------------------------------------
# Dtype
-
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
@@ -620,7 +610,7 @@ def __array__(self, dtype=None, copy=True):
if is_datetime64_any_dtype(self.sp_values.dtype):
# However, we *do* special-case the common case of
# a datetime64 with pandas NaT.
- if fill_value is pd.NaT:
+ if fill_value is NaT:
# Can't put pd.NaT in a datetime64[ns]
fill_value = np.datetime64('NaT')
try:
@@ -710,7 +700,7 @@ def _null_fill_value(self):
def _fill_value_matches(self, fill_value):
if self._null_fill_value:
- return pd.isna(fill_value)
+ return isna(fill_value)
else:
return self.fill_value == fill_value
@@ -855,7 +845,7 @@ def _first_fill_value_loc(self):
return np.searchsorted(diff, 2) + 1
def unique(self):
- uniques = list(pd.unique(self.sp_values))
+ uniques = list(algos.unique(self.sp_values))
fill_loc = self._first_fill_value_loc()
if fill_loc >= 0:
uniques.insert(fill_loc, self.fill_value)
@@ -871,8 +861,8 @@ def factorize(self, na_sentinel=-1):
# ExtensionArray.factorize -> Tuple[EA, EA]
# Given that we have to return a dense array of labels, why bother
# implementing an efficient factorize?
- labels, uniques = pd.factorize(np.asarray(self),
- na_sentinel=na_sentinel)
+ labels, uniques = algos.factorize(np.asarray(self),
+ na_sentinel=na_sentinel)
uniques = SparseArray(uniques, dtype=self.dtype)
return labels, uniques
@@ -889,6 +879,8 @@ def value_counts(self, dropna=True):
-------
counts : Series
"""
+ from pandas import Index, Series
+
keys, counts = algos._value_counts_arraylike(self.sp_values,
dropna=dropna)
fcounts = self.sp_index.ngaps
@@ -897,7 +889,7 @@ def value_counts(self, dropna=True):
pass
else:
if self._null_fill_value:
- mask = pd.isna(keys)
+ mask = isna(keys)
else:
mask = keys == self.fill_value
@@ -907,9 +899,9 @@ def value_counts(self, dropna=True):
keys = np.insert(keys, 0, self.fill_value)
counts = np.insert(counts, 0, fcounts)
- if not isinstance(keys, pd.Index):
- keys = pd.Index(keys)
- result = pd.Series(counts, index=keys)
+ if not isinstance(keys, ABCIndexClass):
+ keys = Index(keys)
+ result = Series(counts, index=keys)
return result
# --------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index d813d8430d9e9..d03a88ea78f6f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -24,7 +24,7 @@
from pandas.compat.numpy import function as nv
from pandas.core import base, generic
from pandas.core.accessor import CachedAccessor
-from pandas.core.arrays import ExtensionArray, period_array
+from pandas.core.arrays import ExtensionArray, SparseArray, period_array
from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
from pandas.core.config import get_option
@@ -1367,7 +1367,6 @@ def to_sparse(self, kind='block', fill_value=None):
"""
# TODO: deprecate
from pandas.core.sparse.series import SparseSeries
- from pandas.core.arrays import SparseArray
values = SparseArray(self, kind=kind, fill_value=fill_value)
return SparseSeries(
diff --git a/setup.cfg b/setup.cfg
index f26eac0d2ae62..a5006d66868f6 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -175,8 +175,6 @@ skip=
pandas/core/reshape/merge.py,
pandas/core/reshape/reshape.py,
pandas/core/reshape/pivot.py,
- pandas/core/sparse/array.py,
- pandas/core/arrays/sparse.py,
pandas/core/sparse/api.py,
pandas/core/sparse/series.py,
pandas/core/sparse/frame.py,
| arrays/sparse.py was doing an `import pandas as pd`, which is prone to cause circular imports. I've eliminated that import and updated the namespaces appropriately (NaT, isna, algos.unique / factorize).
Also updated `setup.cfg` to check the import order in `arrays/sparse.py`. | https://api.github.com/repos/pandas-dev/pandas/pulls/23329 | 2018-10-25T15:33:49Z | 2018-10-26T12:23:40Z | 2018-10-26T12:23:40Z | 2018-10-26T20:48:44Z |
ENH: Implement IntervalIndex.is_overlapping | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 81bb420c47a99..bf86204fe0fef 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -1671,6 +1671,7 @@ IntervalIndex Components
IntervalIndex.length
IntervalIndex.values
IntervalIndex.is_non_overlapping_monotonic
+ IntervalIndex.is_overlapping
IntervalIndex.get_loc
IntervalIndex.get_indexer
IntervalIndex.set_closed
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index 6fd0a224b81b2..8a368f5c3c009 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -294,6 +294,7 @@ Other Enhancements
- :meth:`MultiIndex.to_flat_index` has been added to flatten multiple levels into a single-level :class:`Index` object.
- :meth:`DataFrame.to_stata` and :class:` pandas.io.stata.StataWriter117` can write mixed sting columns to Stata strl format (:issue:`23633`)
- :meth:`DataFrame.between_time` and :meth:`DataFrame.at_time` have gained the an ``axis`` parameter (:issue: `8839`)
+- :class:`IntervalIndex` has gained the :attr:`~IntervalIndex.is_overlapping` attribute to indicate if the ``IntervalIndex`` contains any overlapping intervals (:issue:`23309`)
.. _whatsnew_0240.api_breaking:
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in
index 7be3bdbc1048a..fb6f30c030f11 100644
--- a/pandas/_libs/intervaltree.pxi.in
+++ b/pandas/_libs/intervaltree.pxi.in
@@ -26,7 +26,7 @@ cdef class IntervalTree(IntervalMixin):
cdef:
readonly object left, right, root, dtype
readonly str closed
- object _left_sorter, _right_sorter
+ object _is_overlapping, _left_sorter, _right_sorter
def __init__(self, left, right, closed='right', leaf_size=100):
"""
@@ -81,6 +81,26 @@ cdef class IntervalTree(IntervalMixin):
self._right_sorter = np.argsort(self.right)
return self._right_sorter
+ @property
+ def is_overlapping(self):
+ """
+ Determine if the IntervalTree contains overlapping intervals.
+ Cached as self._is_overlapping.
+ """
+ if self._is_overlapping is not None:
+ return self._is_overlapping
+
+ # <= when both sides closed since endpoints can overlap
+ op = le if self.closed == 'both' else lt
+
+ # overlap if start of current interval < end of previous interval
+ # (current and previous in terms of sorted order by left/start side)
+ current = self.left[self.left_sorter[1:]]
+ previous = self.right[self.left_sorter[:-1]]
+ self._is_overlapping = bool(op(current, previous).any())
+
+ return self._is_overlapping
+
def get_loc(self, scalar_t key):
"""Return all positions corresponding to intervals that overlap with
the given scalar key
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index 70be850481d85..b055bc3f2eb52 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -74,6 +74,7 @@
length
values
is_non_overlapping_monotonic
+%(extra_attributes)s\
Methods
-------
@@ -107,6 +108,7 @@
summary="Pandas array for interval data that are closed on the same side.",
versionadded="0.24.0",
name='',
+ extra_attributes='',
extra_methods='',
examples=textwrap.dedent("""\
Examples
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 1ebcf213ab0eb..f3e84d5b6c963 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -104,6 +104,7 @@ def _new_IntervalIndex(cls, d):
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs['name'],
versionadded="0.20.0",
+ extra_attributes="is_overlapping\n",
extra_methods="contains\n",
examples=textwrap.dedent("""\
Examples
@@ -464,6 +465,61 @@ def is_unique(self):
def is_non_overlapping_monotonic(self):
return self._data.is_non_overlapping_monotonic
+ @property
+ def is_overlapping(self):
+ """
+ Return True if the IntervalIndex has overlapping intervals, else False.
+
+ Two intervals overlap if they share a common point, including closed
+ endpoints. Intervals that only have an open endpoint in common do not
+ overlap.
+
+ .. versionadded:: 0.24.0
+
+ Returns
+ -------
+ bool
+ Boolean indicating if the IntervalIndex has overlapping intervals.
+
+ Examples
+ --------
+ >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
+ >>> index
+ IntervalIndex([(0, 2], (1, 3], (4, 5]],
+ closed='right',
+ dtype='interval[int64]')
+ >>> index.is_overlapping
+ True
+
+ Intervals that share closed endpoints overlap:
+
+ >>> index = pd.interval_range(0, 3, closed='both')
+ >>> index
+ IntervalIndex([[0, 1], [1, 2], [2, 3]],
+ closed='both',
+ dtype='interval[int64]')
+ >>> index.is_overlapping
+ True
+
+ Intervals that only have an open endpoint in common do not overlap:
+
+ >>> index = pd.interval_range(0, 3, closed='left')
+ >>> index
+ IntervalIndex([[0, 1), [1, 2), [2, 3)],
+ closed='left',
+ dtype='interval[int64]')
+ >>> index.is_overlapping
+ False
+
+ See Also
+ --------
+ Interval.overlaps : Check whether two Interval objects overlap.
+ IntervalIndex.overlaps : Check an IntervalIndex elementwise for
+ overlaps.
+ """
+ # GH 23309
+ return self._engine.is_overlapping
+
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
@@ -570,6 +626,10 @@ def _maybe_convert_i8(self, key):
else:
# DatetimeIndex/TimedeltaIndex
key_dtype, key_i8 = key.dtype, Index(key.asi8)
+ if key.hasnans:
+ # convert NaT from it's i8 value to np.nan so it's not viewed
+ # as a valid value, maybe causing errors (e.g. is_overlapping)
+ key_i8 = key_i8.where(~key._isnan)
# ensure consistency with IntervalIndex subtype
subtype = self.dtype.subtype
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index da3b3253ecbd1..c4dac6948cd7a 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -654,6 +654,23 @@ def test_maybe_convert_i8(self, breaks):
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
+ @pytest.mark.parametrize('breaks', [
+ date_range('2018-01-01', periods=5),
+ timedelta_range('0 days', periods=5)])
+ def test_maybe_convert_i8_nat(self, breaks):
+ # GH 20636
+ index = IntervalIndex.from_breaks(breaks)
+
+ to_convert = breaks._constructor([pd.NaT] * 3)
+ expected = pd.Float64Index([np.nan] * 3)
+ result = index._maybe_convert_i8(to_convert)
+ tm.assert_index_equal(result, expected)
+
+ to_convert = to_convert.insert(0, breaks[0])
+ expected = expected.insert(0, float(breaks[0].value))
+ result = index._maybe_convert_i8(to_convert)
+ tm.assert_index_equal(result, expected)
+
@pytest.mark.parametrize('breaks', [
np.arange(5, dtype='int64'),
np.arange(5, dtype='float64')], ids=lambda x: str(x.dtype))
@@ -1082,6 +1099,50 @@ def test_is_non_overlapping_monotonic(self, closed):
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
+ @pytest.mark.parametrize('start, shift, na_value', [
+ (0, 1, np.nan),
+ (Timestamp('2018-01-01'), Timedelta('1 day'), pd.NaT),
+ (Timedelta('0 days'), Timedelta('1 day'), pd.NaT)])
+ def test_is_overlapping(self, start, shift, na_value, closed):
+ # GH 23309
+ # see test_interval_tree.py for extensive tests; interface tests here
+
+ # non-overlapping
+ tuples = [(start + n * shift, start + (n + 1) * shift)
+ for n in (0, 2, 4)]
+ index = IntervalIndex.from_tuples(tuples, closed=closed)
+ assert index.is_overlapping is False
+
+ # non-overlapping with NA
+ tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
+ index = IntervalIndex.from_tuples(tuples, closed=closed)
+ assert index.is_overlapping is False
+
+ # overlapping
+ tuples = [(start + n * shift, start + (n + 2) * shift)
+ for n in range(3)]
+ index = IntervalIndex.from_tuples(tuples, closed=closed)
+ assert index.is_overlapping is True
+
+ # overlapping with NA
+ tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
+ index = IntervalIndex.from_tuples(tuples, closed=closed)
+ assert index.is_overlapping is True
+
+ # common endpoints
+ tuples = [(start + n * shift, start + (n + 1) * shift)
+ for n in range(3)]
+ index = IntervalIndex.from_tuples(tuples, closed=closed)
+ result = index.is_overlapping
+ expected = closed == 'both'
+ assert result is expected
+
+ # common endpoints with NA
+ tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
+ index = IntervalIndex.from_tuples(tuples, closed=closed)
+ result = index.is_overlapping
+ assert result is expected
+
@pytest.mark.parametrize('tuples', [
lzip(range(10), range(1, 11)),
lzip(date_range('20170101', periods=10),
diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py
index 686cdaccd3883..90255835d9147 100644
--- a/pandas/tests/indexes/interval/test_interval_tree.py
+++ b/pandas/tests/indexes/interval/test_interval_tree.py
@@ -1,5 +1,7 @@
from __future__ import division
+from itertools import permutations
+
import numpy as np
import pytest
@@ -135,3 +137,36 @@ def test_get_indexer_closed(self, closed, leaf_size):
expected = found if tree.closed_right else not_found
tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.5))
+
+ @pytest.mark.parametrize('left, right, expected', [
+ (np.array([0, 1, 4]), np.array([2, 3, 5]), True),
+ (np.array([0, 1, 2]), np.array([5, 4, 3]), True),
+ (np.array([0, 1, np.nan]), np.array([5, 4, np.nan]), True),
+ (np.array([0, 2, 4]), np.array([1, 3, 5]), False),
+ (np.array([0, 2, np.nan]), np.array([1, 3, np.nan]), False)])
+ @pytest.mark.parametrize('order', map(list, permutations(range(3))))
+ def test_is_overlapping(self, closed, order, left, right, expected):
+ # GH 23309
+ tree = IntervalTree(left[order], right[order], closed=closed)
+ result = tree.is_overlapping
+ assert result is expected
+
+ @pytest.mark.parametrize('order', map(list, permutations(range(3))))
+ def test_is_overlapping_endpoints(self, closed, order):
+ """shared endpoints are marked as overlapping"""
+ # GH 23309
+ left, right = np.arange(3), np.arange(1, 4)
+ tree = IntervalTree(left[order], right[order], closed=closed)
+ result = tree.is_overlapping
+ expected = closed is 'both'
+ assert result is expected
+
+ @pytest.mark.parametrize('left, right', [
+ (np.array([], dtype='int64'), np.array([], dtype='int64')),
+ (np.array([0], dtype='int64'), np.array([1], dtype='int64')),
+ (np.array([np.nan]), np.array([np.nan])),
+ (np.array([np.nan] * 3), np.array([np.nan] * 3))])
+ def test_is_overlapping_trivial(self, closed, left, right):
+ # GH 23309
+ tree = IntervalTree(left, right, closed=closed)
+ assert tree.is_overlapping is False
| - [X] closes #23309
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
This is needed in the `get_indexer` implementation for the new `IntervalIndex` behavior, as an overlapping `IntervalIndex` may return non-unique indices for a given query; seems cleaner to implement separately. Also makes sense as a general attribute for an `IntervalIndex` to have. | https://api.github.com/repos/pandas-dev/pandas/pulls/23327 | 2018-10-25T07:18:13Z | 2018-11-29T17:22:45Z | 2018-11-29T17:22:45Z | 2018-11-29T17:26:27Z |
TST: Update sparse data generation | diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py
index ca0435141c2e2..6946da6fdc36d 100644
--- a/pandas/tests/extension/test_sparse.py
+++ b/pandas/tests/extension/test_sparse.py
@@ -12,7 +12,7 @@ def make_data(fill_value):
if np.isnan(fill_value):
data = np.random.uniform(size=100)
else:
- data = np.random.randint(0, 100, size=100)
+ data = np.random.randint(1, 100, size=100)
data[2::3] = fill_value
return data
| There's a spurious failure on master when the first
is randomly chosen to be 0, since type(arr.fill_value) doesn't
match arr.dtype.type
https://github.com/pandas-dev/pandas/issues/23124 will fix the underlying issue, but this works around it so we don't have tests failing randomly.
Closes https://github.com/pandas-dev/pandas/issues/23168 | https://api.github.com/repos/pandas-dev/pandas/pulls/23323 | 2018-10-24T19:42:20Z | 2018-10-25T11:06:32Z | 2018-10-25T11:06:32Z | 2018-10-25T11:06:36Z |
CI: pin CPython to 3.6.6 | diff --git a/ci/azure-windows-36.yaml b/ci/azure-windows-36.yaml
index 656a6a31d92b4..d03a6cbbd662c 100644
--- a/ci/azure-windows-36.yaml
+++ b/ci/azure-windows-36.yaml
@@ -16,7 +16,7 @@ dependencies:
- pyarrow
- pytables
- python-dateutil
- - python=3.6.*
+ - python=3.6.6
- pytz
- scipy
- thrift=0.10*
| ref #23319 | https://api.github.com/repos/pandas-dev/pandas/pulls/23322 | 2018-10-24T18:20:38Z | 2018-10-24T18:50:02Z | 2018-10-24T18:50:02Z | 2018-10-25T01:08:42Z |
Support for partition_cols in to_parquet | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 68faefa872c88..13828200f61cd 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -4668,6 +4668,43 @@ Passing ``index=True`` will *always* write the index, even if that's not the
underlying engine's default behavior.
+Partitioning Parquet files
+''''''''''''''''''''''''''
+
+.. versionadded:: 0.24.0
+
+Parquet supports partitioning of data based on the values of one or more columns.
+
+.. ipython:: python
+
+ df = pd.DataFrame({'a': [0, 0, 1, 1], 'b': [0, 1, 0, 1]})
+ df.to_parquet(fname='test', engine='pyarrow', partition_cols=['a'], compression=None)
+
+The `fname` specifies the parent directory to which data will be saved.
+The `partition_cols` are the column names by which the dataset will be partitioned.
+Columns are partitioned in the order they are given. The partition splits are
+determined by the unique values in the partition columns.
+The above example creates a partitioned dataset that may look like:
+
+.. code-block:: text
+
+ test
+ ├── a=0
+ │ ├── 0bac803e32dc42ae83fddfd029cbdebc.parquet
+ │ └── ...
+ └── a=1
+ ├── e6ab24a4f45147b49b54a662f0c412a3.parquet
+ └── ...
+
+.. ipython:: python
+ :suppress:
+
+ from shutil import rmtree
+ try:
+ rmtree('test')
+ except Exception:
+ pass
+
.. _io.sql:
SQL Queries
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 695c4a4e16c9d..efb850418f0aa 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -235,6 +235,7 @@ Other Enhancements
- New attribute :attr:`__git_version__` will return git commit sha of current build (:issue:`21295`).
- Compatibility with Matplotlib 3.0 (:issue:`22790`).
- Added :meth:`Interval.overlaps`, :meth:`IntervalArray.overlaps`, and :meth:`IntervalIndex.overlaps` for determining overlaps between interval-like objects (:issue:`21998`)
+- :func:`~DataFrame.to_parquet` now supports writing a ``DataFrame`` as a directory of parquet files partitioned by a subset of the columns when ``engine = 'pyarrow'`` (:issue:`23283`)
- :meth:`Timestamp.tz_localize`, :meth:`DatetimeIndex.tz_localize`, and :meth:`Series.tz_localize` have gained the ``nonexistent`` argument for alternative handling of nonexistent times. See :ref:`timeseries.timezone_nonexsistent` (:issue:`8917`)
.. _whatsnew_0240.api_breaking:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 7aadf7e735f38..8f96eb73aeb74 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1970,7 +1970,7 @@ def to_feather(self, fname):
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
- index=None, **kwargs):
+ index=None, partition_cols=None, **kwargs):
"""
Write a DataFrame to the binary parquet format.
@@ -1984,7 +1984,11 @@ def to_parquet(self, fname, engine='auto', compression='snappy',
Parameters
----------
fname : str
- String file path.
+ File path or Root Directory path. Will be used as Root Directory
+ path while writing a partitioned dataset.
+
+ .. versionchanged:: 0.24.0
+
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
@@ -1999,6 +2003,12 @@ def to_parquet(self, fname, engine='auto', compression='snappy',
.. versionadded:: 0.24.0
+ partition_cols : list, optional, default None
+ Column names by which to partition the dataset
+ Columns are partitioned in the order they are given
+
+ .. versionadded:: 0.24.0
+
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
@@ -2027,7 +2037,8 @@ def to_parquet(self, fname, engine='auto', compression='snappy',
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
- compression=compression, index=index, **kwargs)
+ compression=compression, index=index,
+ partition_cols=partition_cols, **kwargs)
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 160a26533fb89..3d72b1ec3a47f 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -101,7 +101,8 @@ def __init__(self):
self.api = pyarrow
def write(self, df, path, compression='snappy',
- coerce_timestamps='ms', index=None, **kwargs):
+ coerce_timestamps='ms', index=None, partition_cols=None,
+ **kwargs):
self.validate_dataframe(df)
path, _, _, _ = get_filepath_or_buffer(path, mode='wb')
@@ -109,11 +110,16 @@ def write(self, df, path, compression='snappy',
from_pandas_kwargs = {}
else:
from_pandas_kwargs = {'preserve_index': index}
-
table = self.api.Table.from_pandas(df, **from_pandas_kwargs)
- self.api.parquet.write_table(
- table, path, compression=compression,
- coerce_timestamps=coerce_timestamps, **kwargs)
+ if partition_cols is not None:
+ self.api.parquet.write_to_dataset(
+ table, path, compression=compression,
+ coerce_timestamps=coerce_timestamps,
+ partition_cols=partition_cols, **kwargs)
+ else:
+ self.api.parquet.write_table(
+ table, path, compression=compression,
+ coerce_timestamps=coerce_timestamps, **kwargs)
def read(self, path, columns=None, **kwargs):
path, _, _, should_close = get_filepath_or_buffer(path)
@@ -156,12 +162,23 @@ def __init__(self):
)
self.api = fastparquet
- def write(self, df, path, compression='snappy', index=None, **kwargs):
+ def write(self, df, path, compression='snappy', index=None,
+ partition_cols=None, **kwargs):
self.validate_dataframe(df)
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
+ if 'partition_on' in kwargs and partition_cols is not None:
+ raise ValueError("Cannot use both partition_on and "
+ "partition_cols. Use partition_cols for "
+ "partitioning data")
+ elif 'partition_on' in kwargs:
+ partition_cols = kwargs.pop('partition_on')
+
+ if partition_cols is not None:
+ kwargs['file_scheme'] = 'hive'
+
if is_s3_url(path):
# path is s3:// so we need to open the s3file in 'wb' mode.
# TODO: Support 'ab'
@@ -174,7 +191,8 @@ def write(self, df, path, compression='snappy', index=None, **kwargs):
with catch_warnings(record=True):
self.api.write(path, df, compression=compression,
- write_index=index, **kwargs)
+ write_index=index, partition_on=partition_cols,
+ **kwargs)
def read(self, path, columns=None, **kwargs):
if is_s3_url(path):
@@ -194,15 +212,18 @@ def read(self, path, columns=None, **kwargs):
def to_parquet(df, path, engine='auto', compression='snappy', index=None,
- **kwargs):
+ partition_cols=None, **kwargs):
"""
Write a DataFrame to the parquet format.
Parameters
----------
- df : DataFrame
- path : string
- File path
+ path : str
+ File path or Root Directory path. Will be used as Root Directory path
+ while writing a partitioned dataset.
+
+ .. versionchanged:: 0.24.0
+
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
@@ -216,11 +237,19 @@ def to_parquet(df, path, engine='auto', compression='snappy', index=None,
engine's default behavior will be used.
.. versionadded 0.24.0
+
+ partition_cols : list, optional, default None
+ Column names by which to partition the dataset
+ Columns are partitioned in the order they are given
+
+ .. versionadded:: 0.24.0
+
kwargs
Additional keyword arguments passed to the engine
"""
impl = get_engine(engine)
- return impl.write(df, path, compression=compression, index=index, **kwargs)
+ return impl.write(df, path, compression=compression, index=index,
+ partition_cols=partition_cols, **kwargs)
def read_parquet(path, engine='auto', columns=None, **kwargs):
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 3b3e7f757bf60..6024fccb15c76 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -1,4 +1,5 @@
""" test parquet compat """
+import os
import pytest
import datetime
@@ -454,6 +455,18 @@ def test_s3_roundtrip(self, df_compat, s3_resource, pa):
check_round_trip(df_compat, pa,
path='s3://pandas-test/pyarrow.parquet')
+ def test_partition_cols_supported(self, pa, df_full):
+ # GH #23283
+ partition_cols = ['bool', 'int']
+ df = df_full
+ with tm.ensure_clean_dir() as path:
+ df.to_parquet(path, partition_cols=partition_cols,
+ compression=None)
+ import pyarrow.parquet as pq
+ dataset = pq.ParquetDataset(path, validate_schema=False)
+ assert len(dataset.partitions.partition_names) == 2
+ assert dataset.partitions.partition_names == set(partition_cols)
+
class TestParquetFastParquet(Base):
@@ -519,3 +532,37 @@ def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
check_round_trip(df_compat, fp,
path='s3://pandas-test/fastparquet.parquet')
+
+ def test_partition_cols_supported(self, fp, df_full):
+ # GH #23283
+ partition_cols = ['bool', 'int']
+ df = df_full
+ with tm.ensure_clean_dir() as path:
+ df.to_parquet(path, engine="fastparquet",
+ partition_cols=partition_cols, compression=None)
+ assert os.path.exists(path)
+ import fastparquet
+ actual_partition_cols = fastparquet.ParquetFile(path, False).cats
+ assert len(actual_partition_cols) == 2
+
+ def test_partition_on_supported(self, fp, df_full):
+ # GH #23283
+ partition_cols = ['bool', 'int']
+ df = df_full
+ with tm.ensure_clean_dir() as path:
+ df.to_parquet(path, engine="fastparquet", compression=None,
+ partition_on=partition_cols)
+ assert os.path.exists(path)
+ import fastparquet
+ actual_partition_cols = fastparquet.ParquetFile(path, False).cats
+ assert len(actual_partition_cols) == 2
+
+ def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full):
+ # GH #23283
+ partition_cols = ['bool', 'int']
+ df = df_full
+ with pytest.raises(ValueError):
+ with tm.ensure_clean_dir() as path:
+ df.to_parquet(path, engine="fastparquet", compression=None,
+ partition_on=partition_cols,
+ partition_cols=partition_cols)
diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py
index d1dc91f94e3c4..c10ad72d39f8e 100644
--- a/pandas/tests/util/test_testing.py
+++ b/pandas/tests/util/test_testing.py
@@ -876,3 +876,10 @@ def test_datapath_missing(datapath, request):
)
assert result == expected
+
+
+def test_create_temp_directory():
+ with tm.ensure_clean_dir() as path:
+ assert os.path.exists(path)
+ assert os.path.isdir(path)
+ assert not os.path.exists(path)
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 96387349eecd7..f0dcfda2f52ad 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -6,6 +6,7 @@
import locale
import os
import re
+from shutil import rmtree
import string
import subprocess
import sys
@@ -759,6 +760,25 @@ def ensure_clean(filename=None, return_filelike=False):
print("Exception on removing file: {error}".format(error=e))
+@contextmanager
+def ensure_clean_dir():
+ """
+ Get a temporary directory path and agrees to remove on close.
+
+ Yields
+ ------
+ Temporary directory path
+ """
+ directory_name = tempfile.mkdtemp(suffix='')
+ try:
+ yield directory_name
+ finally:
+ try:
+ rmtree(directory_name)
+ except Exception:
+ pass
+
+
# -----------------------------------------------------------------------------
# Comparators
| - [x] closes [#23283](https://github.com/pandas-dev/pandas/issues/23283)
- [x] tests passed
- [x] passes git diff upstream/master -u -- "*.py" | flake8 --diff
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/23321 | 2018-10-24T18:17:23Z | 2018-11-10T12:11:53Z | 2018-11-10T12:11:53Z | 2018-11-10T12:11:54Z |
BUG/TST: timedelta-like with Index/Series/DataFrame ops | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index a58d34574d28d..f3fd924ee7e6e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4944,7 +4944,7 @@ def _combine_match_columns(self, other, func, level=None):
assert left.columns.equals(right.index)
return ops.dispatch_to_series(left, right, func, axis="columns")
- def _combine_const(self, other, func, errors='raise'):
+ def _combine_const(self, other, func):
assert lib.is_scalar(other) or np.ndim(other) == 0
return ops.dispatch_to_series(self, other, func)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index a9edad1fa2e01..1ffdac1989129 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -4702,6 +4702,13 @@ def dropna(self, how='any'):
def _evaluate_with_timedelta_like(self, other, op):
# Timedelta knows how to operate with np.array, so dispatch to that
# operation and then wrap the results
+ if self._is_numeric_dtype and op.__name__ in ['add', 'sub',
+ 'radd', 'rsub']:
+ raise TypeError("Operation {opname} between {cls} and {other} "
+ "is invalid".format(opname=op.__name__,
+ cls=type(self).__name__,
+ other=type(other).__name__))
+
other = Timedelta(other)
values = self.values
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index cce5fda7dba28..673ab9f2118a4 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -14,7 +14,8 @@
from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.common import (
is_int64_dtype, is_integer, is_scalar, is_timedelta64_dtype)
-from pandas.core.dtypes.generic import ABCSeries, ABCTimedeltaIndex
+from pandas.core.dtypes.generic import (
+ ABCDataFrame, ABCSeries, ABCTimedeltaIndex)
from pandas.core import ops
import pandas.core.common as com
@@ -558,6 +559,9 @@ def __getitem__(self, key):
return super_getitem(key)
def __floordiv__(self, other):
+ if isinstance(other, (ABCSeries, ABCDataFrame)):
+ return NotImplemented
+
if is_integer(other) and other != 0:
if (len(self) == 0 or
self._start % other == 0 and
@@ -589,7 +593,7 @@ def _make_evaluate_binop(op, step=False):
"""
def _evaluate_numeric_binop(self, other):
- if isinstance(other, ABCSeries):
+ if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 2335b26c576eb..fbfdfb9c01237 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -130,6 +130,13 @@ def maybe_upcast_for_op(obj):
# implementation; otherwise operation against numeric-dtype
# raises TypeError
return pd.Timedelta(obj)
+ elif isinstance(obj, np.timedelta64) and not isna(obj):
+ # In particular non-nanosecond timedelta64 needs to be cast to
+ # nanoseconds, or else we get undesired behavior like
+ # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
+ # The isna check is to avoid casting timedelta64("NaT"), which would
+ # return NaT and incorrectly be treated as a datetime-NaT.
+ return pd.Timedelta(obj)
elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj):
# GH#22390 Unfortunately we need to special-case right-hand
# timedelta64 dtypes because numpy casts integer dtypes to
@@ -1405,11 +1412,12 @@ def wrapper(left, right):
index=left.index, name=res_name,
dtype=result.dtype)
- elif is_timedelta64_dtype(right) and not is_scalar(right):
- # i.e. exclude np.timedelta64 object
+ elif is_timedelta64_dtype(right):
+ # We should only get here with non-scalar or timedelta64('NaT')
+ # values for right
# Note: we cannot use dispatch_to_index_op because
- # that may incorrectly raise TypeError when we
- # should get NullFrequencyError
+ # that may incorrectly raise TypeError when we
+ # should get NullFrequencyError
result = op(pd.Index(left), right)
return construct_result(left, result,
index=left.index, name=res_name,
@@ -1941,8 +1949,7 @@ def f(self, other):
# straight boolean comparisons we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
- res = self._combine_const(other, func,
- errors='ignore')
+ res = self._combine_const(other, func)
return res.fillna(True).astype(bool)
f.__name__ = op_name
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index c7d8be0d2e9e4..ee7de49bc1bce 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -620,7 +620,7 @@ def _combine_match_columns(self, other, func, level=None):
new_data, index=left.index, columns=left.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
- def _combine_const(self, other, func, errors='raise'):
+ def _combine_const(self, other, func):
return self._apply_columns(lambda x: func(x, other))
def _get_op_result_fill_value(self, other, func):
diff --git a/pandas/tests/arithmetic/conftest.py b/pandas/tests/arithmetic/conftest.py
index b800b66e8edea..cbe26a06d34c6 100644
--- a/pandas/tests/arithmetic/conftest.py
+++ b/pandas/tests/arithmetic/conftest.py
@@ -70,7 +70,8 @@ def scalar_td(request):
pd.Timedelta(days=3).to_pytimedelta(),
pd.Timedelta('72:00:00'),
np.timedelta64(3, 'D'),
- np.timedelta64(72, 'h')])
+ np.timedelta64(72, 'h')],
+ ids=lambda x: type(x).__name__)
def three_days(request):
"""
Several timedelta-like and DateOffset objects that each represent
@@ -84,7 +85,8 @@ def three_days(request):
pd.Timedelta(hours=2).to_pytimedelta(),
pd.Timedelta(seconds=2 * 3600),
np.timedelta64(2, 'h'),
- np.timedelta64(120, 'm')])
+ np.timedelta64(120, 'm')],
+ ids=lambda x: type(x).__name__)
def two_hours(request):
"""
Several timedelta-like and DateOffset objects that each represent
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index 25845dd8b3151..9163f2e1a3d1c 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -148,15 +148,11 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box):
tm.assert_equal(commute, expected)
def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
- index = numeric_idx[1:3]
- broken = (isinstance(three_days, np.timedelta64) and
- three_days.dtype != 'm8[ns]')
- broken = broken or isinstance(three_days, pd.offsets.Tick)
- if box is not pd.Index and broken:
- # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
- raise pytest.xfail("timedelta64 not converted to nanos; "
- "Tick division not implemented")
+ if box is not pd.Index and isinstance(three_days, pd.offsets.Tick):
+ raise pytest.xfail("Tick division not implemented")
+
+ index = numeric_idx[1:3]
expected = TimedeltaIndex(['3 Days', '36 Hours'])
@@ -169,6 +165,26 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box):
with pytest.raises(TypeError):
index / three_days
+ @pytest.mark.parametrize('other', [
+ pd.Timedelta(hours=31),
+ pd.Timedelta(hours=31).to_pytimedelta(),
+ pd.Timedelta(hours=31).to_timedelta64(),
+ pd.Timedelta(hours=31).to_timedelta64().astype('m8[h]'),
+ np.timedelta64('NaT'),
+ np.timedelta64('NaT', 'D'),
+ pd.offsets.Minute(3),
+ pd.offsets.Second(0)])
+ def test_add_sub_timedeltalike_invalid(self, numeric_idx, other, box):
+ left = tm.box_expected(numeric_idx, box)
+ with pytest.raises(TypeError):
+ left + other
+ with pytest.raises(TypeError):
+ other + left
+ with pytest.raises(TypeError):
+ left - other
+ with pytest.raises(TypeError):
+ other - left
+
# ------------------------------------------------------------------
# Arithmetic
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index 9930297fd1a3c..d1ea51a46889f 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -1051,10 +1051,8 @@ def test_tdi_mul_float_series(self, box_df_fail):
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
- def test_tdi_rmul_arraylike(self, other, box_df_fail):
- # RangeIndex fails to return NotImplemented, for others
- # DataFrame tries to broadcast incorrectly
- box = box_df_fail
+ def test_tdi_rmul_arraylike(self, other, box_df_broadcast_failure):
+ box = box_df_broadcast_failure
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py
index ecda48822eb0f..efea9b58ecb7a 100644
--- a/pandas/tests/indexes/test_range.py
+++ b/pandas/tests/indexes/test_range.py
@@ -185,6 +185,25 @@ def test_constructor_name(self):
assert copy.name == 'copy'
assert new.name == 'new'
+ # TODO: mod, divmod?
+ @pytest.mark.parametrize('op', [operator.add, operator.sub,
+ operator.mul, operator.floordiv,
+ operator.truediv, operator.pow])
+ def test_arithmetic_with_frame_or_series(self, op):
+ # check that we return NotImplemented when operating with Series
+ # or DataFrame
+ index = pd.RangeIndex(5)
+ other = pd.Series(np.random.randn(5))
+
+ expected = op(pd.Series(index), other)
+ result = op(index, other)
+ tm.assert_series_equal(result, expected)
+
+ other = pd.DataFrame(np.random.randn(2, 5))
+ expected = op(pd.DataFrame([index, index]), other)
+ result = op(index, other)
+ tm.assert_frame_equal(result, expected)
+
def test_numeric_compat2(self):
# validate that we are handling the RangeIndex overrides to numeric ops
# and returning RangeIndex where possible
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index cdf35ea96588a..b327b158adc24 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -1243,7 +1243,6 @@ def test_binop_other(self, op, value, dtype):
(operator.mul, '<M8[ns]'),
(operator.add, '<M8[ns]'),
(operator.pow, '<m8[ns]'),
- (operator.mod, '<m8[ns]'),
(operator.mul, '<m8[ns]')}
if (op, dtype) in invalid:
| Broken off from #23308, with additional fixes for problems found along the way.
```
val = np.timedelta64(4*10**9, 'ns')
ser = pd.Series([val]) * 1.5
>>> ser % val # <-- raises TypeError in master
0 00:00:02
dtype: timedelta64[ns]
```
```
ri = pd.RangeIndex(1)
di = pd.DataFrame(ri)
>>> ri + df # <-- raises TypeError in master
0
0 0
```
```
td64 = np.timedelta64(4*10**9, 'ns')
tdnat = np.timedelta64("NaT", "D")
ser = pd.Series([0, 1, 2])
>>> ser + td64 # <-- fails to raise in master, now raises TypeError; ditto with tdnat
# ditto if for radd, sub, and rsub
>>> pd.Index(ser) + tdnat # <-- in master returns all-NaT DatetimeIndex, now raises
# ditto for sub; reversed ops DO raise TypeError in master, but for wrong reason
>>> pd.DataFrame(ser) + tdnat # <-- in msater returns all-NaT timedelta64 DataFrame, now raises
# ditto for radd, sub, and rsub
```
```
off = pd.offsets.Minute()
ser = pd.Series([1, 2, 3])
>>> off / ser # <-- raises TypeError in master
0 00:01:00
1 00:00:30
2 00:00:20
dtype: timedelta64[ns]
```
One more that isn't a bug in master, but where the new behavior is better:
```
off = pd.offsets.Minute()
ser = pd.Series([1, 2, 3])
# master
>>> ser * off
0 <Minute>
1 <2 * Minutes>
2 <3 * Minutes>
dtype: object
# PR
>>> ser * off
0 00:01:00
1 00:02:00
2 00:03:00
dtype: timedelta64[ns]
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/23320 | 2018-10-24T17:49:35Z | 2018-10-30T16:18:52Z | 2018-10-30T16:18:52Z | 2018-10-30T16:56:33Z |
BUG: GroupBy return EA dtype | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index fc7019c486d9a..62069dfcb2262 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -850,6 +850,7 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your
- :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`).
- Support for reduction operations such as ``sum``, ``mean`` via opt-in base class method override (:issue:`22762`)
- :meth:`Series.unstack` no longer converts extension arrays to object-dtype ndarrays. The output ``DataFrame`` will now have the same dtype as the input. This changes behavior for Categorical and Sparse data (:issue:`23077`).
+- Bug when grouping :meth:`Dataframe.groupby()` and aggregating on ``ExtensionArray`` it was not returning the actual ``ExtensionArray`` dtype (:issue:`23227`).
.. _whatsnew_0240.api.incompatibilities:
@@ -1084,6 +1085,7 @@ Categorical
- Bug when indexing with a boolean-valued ``Categorical``. Now a boolean-valued ``Categorical`` is treated as a boolean mask (:issue:`22665`)
- Constructing a :class:`CategoricalIndex` with empty values and boolean categories was raising a ``ValueError`` after a change to dtype coercion (:issue:`22702`).
- Bug in :meth:`Categorical.take` with a user-provided ``fill_value`` not encoding the ``fill_value``, which could result in a ``ValueError``, incorrect results, or a segmentation fault (:issue:`23296`).
+- Bug when resampling :meth:`Dataframe.resample()` and aggregating on categorical data, the categorical dtype was getting lost. (:issue:`23227`)
Datetimelike
^^^^^^^^^^^^
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index e31929434b5d6..ea7507799fa9a 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -24,7 +24,8 @@ class providing the base-class of operations.
from pandas.util._validators import validate_kwargs
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
-from pandas.core.dtypes.common import ensure_float, is_numeric_dtype, is_scalar
+from pandas.core.dtypes.common import (
+ ensure_float, is_extension_array_dtype, is_numeric_dtype, is_scalar)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algorithms
@@ -754,7 +755,18 @@ def _try_cast(self, result, obj, numeric_only=False):
dtype = obj.dtype
if not is_scalar(result):
- if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
+ if is_extension_array_dtype(dtype):
+ # The function can return something of any type, so check
+ # if the type is compatible with the calling EA.
+ try:
+ result = obj.values._from_sequence(result)
+ except Exception:
+ # https://github.com/pandas-dev/pandas/issues/22850
+ # pandas has no control over what 3rd-party ExtensionArrays
+ # do in _values_from_sequence. We still want ops to work
+ # though, so we catch any regular Exception.
+ pass
+ elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 41ec2d3026499..24bc8ffe2e5a5 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -650,9 +650,10 @@ def test_preserve_dtypes(op):
# groupby
result = getattr(df.groupby("A"), op)()
+
expected = pd.DataFrame({
"B": np.array([1.0, 3.0]),
- "C": np.array([1, 3], dtype="int64")
+ "C": integer_array([1, 3], dtype="Int64")
}, index=pd.Index(['a', 'b'], name='A'))
tm.assert_frame_equal(result, expected)
@@ -673,9 +674,10 @@ def test_reduce_to_float(op):
# groupby
result = getattr(df.groupby("A"), op)()
+
expected = pd.DataFrame({
"B": np.array([1.0, 3.0]),
- "C": np.array([1, 3], dtype="float64")
+ "C": integer_array([1, 3], dtype="Int64")
}, index=pd.Index(['a', 'b'], name='A'))
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/sparse/test_groupby.py b/pandas/tests/sparse/test_groupby.py
index 1d2129312fb1b..d0ff2a02c4046 100644
--- a/pandas/tests/sparse/test_groupby.py
+++ b/pandas/tests/sparse/test_groupby.py
@@ -24,27 +24,39 @@ def test_first_last_nth(self):
sparse_grouped = self.sparse.groupby('A')
dense_grouped = self.dense.groupby('A')
+ sparse_grouped_first = sparse_grouped.first()
+ sparse_grouped_last = sparse_grouped.last()
+ sparse_grouped_nth = sparse_grouped.nth(1)
+
+ dense_grouped_first = dense_grouped.first().to_sparse()
+ dense_grouped_last = dense_grouped.last().to_sparse()
+ dense_grouped_nth = dense_grouped.nth(1).to_sparse()
+
# TODO: shouldn't these all be spares or not?
- tm.assert_frame_equal(sparse_grouped.first(),
- dense_grouped.first())
- tm.assert_frame_equal(sparse_grouped.last(),
- dense_grouped.last())
- tm.assert_frame_equal(sparse_grouped.nth(1),
- dense_grouped.nth(1).to_sparse())
+ tm.assert_frame_equal(sparse_grouped_first,
+ dense_grouped_first)
+ tm.assert_frame_equal(sparse_grouped_last,
+ dense_grouped_last)
+ tm.assert_frame_equal(sparse_grouped_nth,
+ dense_grouped_nth)
def test_aggfuncs(self):
sparse_grouped = self.sparse.groupby('A')
dense_grouped = self.dense.groupby('A')
- tm.assert_frame_equal(sparse_grouped.mean(),
- dense_grouped.mean())
+ result = sparse_grouped.mean().to_sparse()
+ expected = dense_grouped.mean().to_sparse()
+
+ tm.assert_frame_equal(result, expected)
# ToDo: sparse sum includes str column
# tm.assert_frame_equal(sparse_grouped.sum(),
# dense_grouped.sum())
- tm.assert_frame_equal(sparse_grouped.count(),
- dense_grouped.count())
+ result = sparse_grouped.count().to_sparse()
+ expected = dense_grouped.count().to_sparse()
+
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("fill_value", [0, np.nan])
@@ -54,6 +66,5 @@ def test_groupby_includes_fill_value(fill_value):
'b': [fill_value, 1, fill_value, fill_value]})
sdf = df.to_sparse(fill_value=fill_value)
result = sdf.groupby('a').sum()
- expected = df.groupby('a').sum()
- tm.assert_frame_equal(result, expected,
- check_index_type=False)
+ expected = df.groupby('a').sum().to_sparse(fill_value=fill_value)
+ tm.assert_frame_equal(result, expected, check_index_type=False)
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index 69a0613c95475..ed29e20fd5ca5 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -1576,6 +1576,7 @@ def test_resample_categorical_data_with_timedeltaindex(self):
'Group': ['A', 'A']},
index=pd.to_timedelta([0, 10], unit='s'))
expected = expected.reindex(['Group_obj', 'Group'], axis=1)
+ expected['Group'] = expected['Group_obj'].astype('category')
tm.assert_frame_equal(result, expected)
def test_resample_daily_anchored(self):
| - [x] closes #23227
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23318 | 2018-10-24T17:13:19Z | 2018-11-06T14:48:45Z | 2018-11-06T14:48:45Z | 2018-11-06T14:48:50Z |
BUG: Handle Datetimelike data in DataFrame.combine | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 4f17133ef4a8c..bc0198bb8960a 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -1001,6 +1001,7 @@ Datetimelike
- Bug in :func:`to_datetime` with an :class:`Index` argument that would drop the ``name`` from the result (:issue:`21697`)
- Bug in :class:`PeriodIndex` where adding or subtracting a :class:`timedelta` or :class:`Tick` object produced incorrect results (:issue:`22988`)
- Bug in :func:`date_range` when decrementing a start date to a past end date by a negative frequency (:issue:`23270`)
+- Bug in :func:`DataFrame.combine` with datetimelike values raising a TypeError (:issue:`23079`)
Timedelta
^^^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 6dd9174028f18..61721ce4c82e7 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -5141,22 +5141,14 @@ def combine(self, other, func, fill_value=None, overwrite=True):
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
- # see if we need to be represented as i8 (datetimelike)
- # try to keep us at this dtype
- needs_i8_conversion_i = needs_i8_conversion(new_dtype)
- if needs_i8_conversion_i:
- arr = func(series, otherSeries, True)
- else:
- arr = func(series, otherSeries)
-
+ arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
- columns=new_columns)._convert(datetime=True,
- copy=False)
+ columns=new_columns)
def combine_first(self, other):
"""
@@ -5203,15 +5195,28 @@ def combine_first(self, other):
"""
import pandas.core.computation.expressions as expressions
- def combiner(x, y, needs_i8_conversion=False):
- x_values = x.values if hasattr(x, 'values') else x
- y_values = y.values if hasattr(y, 'values') else y
- if needs_i8_conversion:
- mask = isna(x)
- x_values = x_values.view('i8')
- y_values = y_values.view('i8')
- else:
- mask = isna(x_values)
+ def extract_values(arr):
+ # Does two things:
+ # 1. maybe gets the values from the Series / Index
+ # 2. convert datelike to i8
+ if isinstance(arr, (ABCIndexClass, ABCSeries)):
+ arr = arr._values
+
+ if needs_i8_conversion(arr):
+ # TODO(DatetimelikeArray): just use .asi8
+ if is_extension_array_dtype(arr.dtype):
+ arr = arr.asi8
+ else:
+ arr = arr.view('i8')
+ return arr
+
+ def combiner(x, y):
+ mask = isna(x)
+ if isinstance(mask, (ABCIndexClass, ABCSeries)):
+ mask = mask._values
+
+ x_values = extract_values(x)
+ y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py
index ece9559313ba0..2c0fbe9d585cb 100644
--- a/pandas/tests/frame/test_combine_concat.py
+++ b/pandas/tests/frame/test_combine_concat.py
@@ -31,6 +31,24 @@ def test_concat_multiple_frames_dtypes(self):
expected = Series(dict(float64=2, float32=2))
assert_series_equal(results, expected)
+ @pytest.mark.parametrize('data', [
+ pd.date_range('2000', periods=4),
+ pd.date_range('2000', periods=4, tz="US/Central"),
+ pd.period_range('2000', periods=4),
+ pd.timedelta_range(0, periods=4),
+ ])
+ def test_combine_datetlike_udf(self, data):
+ # https://github.com/pandas-dev/pandas/issues/23079
+ df = pd.DataFrame({"A": data})
+ other = df.copy()
+ df.iloc[1, 0] = None
+
+ def combiner(a, b):
+ return b
+
+ result = df.combine(other, combiner)
+ tm.assert_frame_equal(result, other)
+
def test_concat_multiple_tzs(self):
# GH 12467
# combining datetime tz-aware and naive DataFrames
| Closes https://github.com/pandas-dev/pandas/issues/23079 | https://api.github.com/repos/pandas-dev/pandas/pulls/23317 | 2018-10-24T16:29:46Z | 2018-10-26T00:21:24Z | 2018-10-26T00:21:23Z | 2018-10-26T00:21:30Z |
Switched references of App veyor to azure pipelines in the contributing CI section | diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index f898ef54e4101..67b8d287d5d1a 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -657,12 +657,12 @@ Testing With Continuous Integration
-----------------------------------
The *pandas* test suite will run automatically on `Travis-CI <https://travis-ci.org/>`__,
-`Appveyor <https://www.appveyor.com/>`__, and `Circle CI <https://circleci.com/>`__ continuous integration
-services, once your pull request is submitted.
+`Azure Pipelines <https://azure.microsoft.com/en-us/services/devops/pipelines/>`__,
+and `Circle CI <https://circleci.com/>`__ continuous integration services, once your pull request is submitted.
However, if you wish to run the test suite on a branch prior to submitting the pull request,
then the continuous integration services need to be hooked to your GitHub repository. Instructions are here
for `Travis-CI <http://about.travis-ci.org/docs/user/getting-started/>`__,
-`Appveyor <https://www.appveyor.com/docs/>`__ , and `CircleCI <https://circleci.com/>`__.
+`Azure Pipelines <https://docs.microsoft.com/en-us/azure/devops/pipelines/>`__, and `CircleCI <https://circleci.com/>`__.
A pull-request will be considered for merging when you have an all 'green' build. If any tests are failing,
then you will get a red 'X', where you can click through to see the individual failed tests.
@@ -672,8 +672,8 @@ This is an example of a green build.
.. note::
- Each time you push to *your* fork, a *new* run of the tests will be triggered on the CI. Appveyor will auto-cancel
- any non-currently-running tests for that same pull-request. You can enable the auto-cancel feature for
+ Each time you push to *your* fork, a *new* run of the tests will be triggered on the CI.
+ You can enable the auto-cancel feature, which removes any non-currently-running tests for that same pull-request, for
`Travis-CI here <https://docs.travis-ci.com/user/customizing-the-build/#Building-only-the-latest-commit>`__ and
for `CircleCI here <https://circleci.com/changelog-legacy/#option-to-auto-cancel-redundant-builds>`__.
| - Related to #22829 #22760 #22690
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` | https://api.github.com/repos/pandas-dev/pandas/pulls/23311 | 2018-10-24T09:08:00Z | 2018-10-26T00:14:02Z | 2018-10-26T00:14:02Z | 2018-10-26T09:47:30Z |
BUG-23224 Fix PR 23237 / Integer NA creation from None | diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 17e92c3976e2c..9e045a7785660 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -173,7 +173,7 @@ def coerce_to_array(values, dtype, mask=None, copy=False):
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values)
- if inferred_type is 'mixed' and isna(values).any():
+ if inferred_type is 'mixed' and isna(values).all():
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in ['floating', 'integer',
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index e6dae0ffaec28..10961173d4b6b 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -560,7 +560,8 @@ def test_integer_array_constructor_copy():
1.0,
pd.date_range('20130101', periods=2),
np.array(['foo']),
- [[1, 2], [3, 4]]])
+ [[1, 2], [3, 4]],
+ [np.nan, {'a': 1}]])
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
with pytest.raises(TypeError):
| Followup PR to fix an oversight in PR #23237 as discussed in its thread | https://api.github.com/repos/pandas-dev/pandas/pulls/23310 | 2018-10-24T03:32:57Z | 2018-10-25T11:26:14Z | 2018-10-25T11:26:14Z | 2018-10-25T11:26:17Z |
TST: Use __tracebackhide__ to suppress unnecessary parts of tm assertions | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index b5ec0912c5c26..a20f75b74c115 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -480,8 +480,8 @@ def get_locales(prefix=None, normalize=True,
if prefix is None:
return _valid_locales(out_locales, normalize)
- found = re.compile('{prefix}.*'.format(prefix=prefix)) \
- .findall('\n'.join(out_locales))
+ pattern = re.compile('{prefix}.*'.format(prefix=prefix))
+ found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@@ -895,6 +895,7 @@ def _get_ilevel_values(index, level):
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
+ __tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
@@ -934,6 +935,7 @@ def assert_attr_equal(attr, left, right, obj='Attributes'):
Specify object name being compared, internally used to show appropriate
assertion message
"""
+ __tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
@@ -964,14 +966,14 @@ def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
- msg = ('one of \'objs\' is not a matplotlib Axes instance, type '
- 'encountered {name!r}').format(name=el.__class__.__name__)
+ msg = ("one of 'objs' is not a matplotlib Axes instance, type "
+ "encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
- assert isinstance(objs, (plt.Artist, tuple, dict)), \
- ('objs is neither an ndarray of Artist instances nor a '
- 'single Artist instance, tuple, or dict, "objs" is a {name!r}'
- ).format(name=objs.__class__.__name__)
+ assert isinstance(objs, (plt.Artist, tuple, dict)), (
+ 'objs is neither an ndarray of Artist instances nor a '
+ 'single Artist instance, tuple, or dict, "objs" is a {name!r}'
+ .format(name=objs.__class__.__name__))
def isiterable(obj):
@@ -1102,6 +1104,7 @@ def assert_numpy_array_equal(left, right, strict_nan=False,
Specify object name being compared, internally used to show appropriate
assertion message
"""
+ __tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
@@ -1222,6 +1225,7 @@ def assert_series_equal(left, right, check_dtype=True,
Specify object name being compared, internally used to show appropriate
assertion message
"""
+ __tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
@@ -1395,6 +1399,7 @@ def assert_frame_equal(left, right, check_dtype=True,
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
+ __tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
@@ -1530,6 +1535,8 @@ def assert_equal(left, right, **kwargs):
right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
+ __tracebackhide__ = True
+
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
@@ -2017,8 +2024,9 @@ def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
- assert idx_type is None or \
- (idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
+ assert idx_type is None or (idx_type in ('i', 'f', 's', 'u',
+ 'dt', 'p', 'td')
+ and nlevels == 1)
if names is True:
# build default names
@@ -2145,12 +2153,12 @@ def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
- assert r_idx_type is None or \
- (r_idx_type in ('i', 'f', 's',
- 'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
- assert c_idx_type is None or \
- (c_idx_type in ('i', 'f', 's',
- 'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
+ assert r_idx_type is None or (r_idx_type in ('i', 'f', 's',
+ 'u', 'dt', 'p', 'td')
+ and r_idx_nlevels == 1)
+ assert c_idx_type is None or (c_idx_type in ('i', 'f', 's',
+ 'u', 'dt', 'p', 'td')
+ and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
@@ -2483,7 +2491,7 @@ def wrapper(*args, **kwargs):
def assert_raises_regex(_exception, _regexp, _callable=None,
*args, **kwargs):
- """
+ r"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
@@ -2665,6 +2673,8 @@ class for all warnings. To check that no warning is returned,
..warn:: This is *not* thread-safe.
"""
+ __tracebackhide__ = True
+
with warnings.catch_warnings(record=True) as w:
if clear is not None:
| Makes test output much more to the point.
Also gets rid of some backslashes, both for prettiness and to get rid of deprecation warnings in test logs. | https://api.github.com/repos/pandas-dev/pandas/pulls/23307 | 2018-10-24T01:01:29Z | 2018-10-24T11:45:46Z | 2018-10-24T11:45:46Z | 2018-10-24T16:09:49Z |
Add base test for extensionarray setitem #23300 | diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py
index 307543eca2b3e..6ebe3cb7487ab 100644
--- a/pandas/tests/extension/base/setitem.py
+++ b/pandas/tests/extension/base/setitem.py
@@ -9,18 +9,20 @@
class BaseSetitemTests(BaseExtensionTests):
- def test_setitem_scalar_series(self, data):
- arr = pd.Series(data)
- arr[0] = data[1]
- assert arr[0] == data[1]
-
- def test_setitem_sequence(self, data):
- arr = pd.Series(data)
+ def test_setitem_scalar_series(self, data, box_in_series):
+ if box_in_series:
+ data = pd.Series(data)
+ data[0] = data[1]
+ assert data[0] == data[1]
+
+ def test_setitem_sequence(self, data, box_in_series):
+ if box_in_series:
+ data = pd.Series(data)
original = data.copy()
- arr[[0, 1]] = [data[1], data[0]]
- assert arr[0] == original[1]
- assert arr[1] == original[0]
+ data[[0, 1]] = [data[1], data[0]]
+ assert data[0] == original[1]
+ assert data[1] == original[0]
@pytest.mark.parametrize('as_array', [True, False])
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
@@ -32,22 +34,25 @@ def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
xpr = 'cannot set using a {} indexer with a different length'
with tm.assert_raises_regex(ValueError, xpr.format('list-like')):
ser[[0, 1]] = value
+ assert ser._values[[0, 1]] == value
with tm.assert_raises_regex(ValueError, xpr.format('slice')):
ser[slice(3)] = value
+ assert ser._values[slice(3)] == value
- def test_setitem_empty_indxer(self, data):
- ser = pd.Series(data)
- original = ser.copy()
- ser[[]] = []
- self.assert_series_equal(ser, original)
-
- def test_setitem_sequence_broadcasts(self, data):
- arr = pd.Series(data)
-
- arr[[0, 1]] = data[2]
- assert arr[0] == data[2]
- assert arr[1] == data[2]
+ def test_setitem_empty_indxer(self, data, box_in_series):
+ if box_in_series:
+ data = pd.Series(data)
+ original = data.copy()
+ data[np.array([], dtype=int)] = []
+ self.assert_equal(data, original)
+
+ def test_setitem_sequence_broadcasts(self, data, box_in_series):
+ if box_in_series:
+ data = pd.Series(data)
+ data[[0, 1]] = data[2]
+ assert data[0] == data[2]
+ assert data[1] == data[2]
@pytest.mark.parametrize('setter', ['loc', 'iloc'])
def test_setitem_scalar(self, data, setter):
diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py
index 8e397d228a5b6..7758bd01840ae 100644
--- a/pandas/tests/extension/conftest.py
+++ b/pandas/tests/extension/conftest.py
@@ -98,3 +98,9 @@ def data_for_grouping():
Where A < B < C and NA is missing
"""
raise NotImplementedError
+
+
+@pytest.fixture(params=[True, False])
+def box_in_series(request):
+ """Whether to box the data in a Series"""
+ return request.param
| - [x] closes #23300
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/23304 | 2018-10-23T19:17:42Z | 2018-10-26T01:38:18Z | 2018-10-26T01:38:18Z | 2018-10-26T12:20:36Z |
TST: re-enable gbq tests | diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py
index bc604e066a3e8..345af7c8f056a 100644
--- a/pandas/tests/io/test_gbq.py
+++ b/pandas/tests/io/test_gbq.py
@@ -16,6 +16,9 @@
import pandas.util.testing as tm
+api_exceptions = pytest.importorskip("google.api_core.exceptions")
+bigquery = pytest.importorskip("google.cloud.bigquery")
+service_account = pytest.importorskip("google.oauth2.service_account")
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
@@ -67,20 +70,16 @@ def _get_private_key_path():
return private_key_path
-def clean_gbq_environment(private_key=None):
- dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
- private_key=private_key)
+def _get_client():
+ project_id = _get_project_id()
+ credentials = None
- for i in range(1, 10):
- if DATASET_ID + str(i) in dataset.datasets():
- dataset_id = DATASET_ID + str(i)
- table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
- private_key=private_key)
- for j in range(1, 20):
- if TABLE_ID + str(j) in dataset.tables(dataset_id):
- table.delete(TABLE_ID + str(j))
+ private_key_path = _get_private_key_path()
+ if private_key_path:
+ credentials = service_account.Credentials.from_service_account_file(
+ private_key_path)
- dataset.delete(dataset_id)
+ return bigquery.Client(project=project_id, credentials=credentials)
def make_mixed_dataframe_v2(test_size):
@@ -109,7 +108,6 @@ def test_read_gbq_without_dialect_warns_future_change(monkeypatch):
pd.read_gbq("SELECT 1")
-@pytest.mark.xfail(reason="failing for pandas-gbq >= 0.7.0")
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@@ -122,18 +120,22 @@ def setup_class(cls):
_skip_if_no_project_id()
_skip_if_no_private_key_path()
- clean_gbq_environment(_get_private_key_path())
- pandas_gbq.gbq._Dataset(_get_project_id(),
- private_key=_get_private_key_path()
- ).create(DATASET_ID + "1")
+ cls.client = _get_client()
+ cls.dataset = cls.client.dataset(DATASET_ID + "1")
+ try:
+ # Clean-up previous test runs.
+ cls.client.delete_dataset(cls.dataset, delete_contents=True)
+ except api_exceptions.NotFound:
+ pass # It's OK if the dataset doesn't already exist.
+
+ cls.client.create_dataset(bigquery.Dataset(cls.dataset))
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
-
- clean_gbq_environment(_get_private_key_path())
+ cls.client.delete_dataset(cls.dataset, delete_contents=True)
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
@@ -147,5 +149,6 @@ def test_roundtrip(self):
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
- private_key=_get_private_key_path())
+ private_key=_get_private_key_path(),
+ dialect="standard")
assert result['num_rows'][0] == test_size
| Fix clean-up steps to use the google.cloud.bigquery client
library directly, since some of the _Dataset and _Table
logic was removed from pandas-gbq.
Follow-up to #23281
- [x] tests added / passed
```
$ pytest pandas/tests/io/test_gbq.py
====== test session starts ======
platform darwin -- Python 3.6.4, pytest-3.6.2, py-1.5.3, pluggy-0.6.0
hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/Users/swast/src/pandas/pandas/.hypothesis/examples')
rootdir: /Users/swast/src/pandas/pandas, inifile: setup.cfg
plugins: xdist-1.22.2, forked-0.2, cov-2.5.1, hypothesis-3.70.3
collected 2 items
pandas/tests/io/test_gbq.py .. [100%]
====== slowest 10 test durations ======
12.55s call pandas/tests/io/test_gbq.py::TestToGBQIntegrationWithServiceAccountKeyPath::test_roundtrip
1.30s setup pandas/tests/io/test_gbq.py::TestToGBQIntegrationWithServiceAccountKeyPath::test_roundtrip
0.66s teardown pandas/tests/io/test_gbq.py::TestToGBQIntegrationWithServiceAccountKeyPath::test_roundtrip
0.18s call pandas/tests/io/test_gbq.py::test_read_gbq_without_dialect_warns_future_change
0.00s setup pandas/tests/io/test_gbq.py::test_read_gbq_without_dialect_warns_future_change
0.00s teardown pandas/tests/io/test_gbq.py::test_read_gbq_without_dialect_warns_future_change
====== 2 passed in 15.00 seconds ======
```
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23303 | 2018-10-23T18:10:24Z | 2018-10-26T00:18:57Z | 2018-10-26T00:18:57Z | 2018-10-26T00:34:55Z |
fix and test incorrect case in delta_to_nanoseconds | diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 684344ceb9002..1af9cd619c5f9 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -40,7 +40,7 @@ def get_time_micros(ndarray[int64_t] dtindex):
return micros
-def build_field_sarray(ndarray[int64_t] dtindex):
+def build_field_sarray(int64_t[:] dtindex):
"""
Datetime as int64 representation to a structured array of fields
"""
@@ -542,7 +542,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
@cython.wraparound(False)
@cython.boundscheck(False)
-def get_timedelta_field(ndarray[int64_t] tdindex, object field):
+def get_timedelta_field(int64_t[:] tdindex, object field):
"""
Given a int64-based timedelta index, extract the days, hrs, sec.,
field and return an array of these values.
diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd
index eda4418902513..c02a840281266 100644
--- a/pandas/_libs/tslibs/timedeltas.pxd
+++ b/pandas/_libs/tslibs/timedeltas.pxd
@@ -3,8 +3,6 @@
from numpy cimport int64_t
# Exposed for tslib, not intended for outside use.
-cdef parse_timedelta_string(object ts)
-cpdef int64_t cast_from_unit(object ts, object unit) except? -1
+cdef int64_t cast_from_unit(object ts, object unit) except? -1
cpdef int64_t delta_to_nanoseconds(delta) except? -1
cpdef convert_to_timedelta64(object ts, object unit)
-cpdef array_to_timedelta64(object[:] values, unit=*, errors=*)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 9c8be1901d1dc..d4e0e7f8ad72d 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -119,8 +119,6 @@ def ints_to_pytimedelta(int64_t[:] arr, box=False):
# ----------------------------------------------------------------------
cpdef int64_t delta_to_nanoseconds(delta) except? -1:
- if util.is_array(delta):
- return delta.astype('m8[ns]').astype('int64')
if hasattr(delta, 'nanos'):
return delta.nanos
if hasattr(delta, 'delta'):
@@ -129,10 +127,12 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1:
return delta.astype("timedelta64[ns]").item()
if is_integer_object(delta):
return delta
+ if PyDelta_Check(delta):
+ return (delta.days * 24 * 60 * 60 * 1000000 +
+ delta.seconds * 1000000 +
+ delta.microseconds) * 1000
- return (delta.days * 24 * 60 * 60 * 1000000 +
- delta.seconds * 1000000 +
- delta.microseconds) * 1000
+ raise TypeError(type(delta))
cpdef convert_to_timedelta64(object ts, object unit):
@@ -198,7 +198,7 @@ cpdef convert_to_timedelta64(object ts, object unit):
return ts.astype('timedelta64[ns]')
-cpdef array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
+def array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
"""
Convert an ndarray to an array of timedeltas. If errors == 'coerce',
coerce non-convertible objects to NaT. Otherwise, raise.
@@ -235,7 +235,7 @@ cpdef array_to_timedelta64(object[:] values, unit='ns', errors='raise'):
return iresult.base # .base to access underlying np.ndarray
-cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
+cdef inline int64_t cast_from_unit(object ts, object unit) except? -1:
""" return a casting of the unit represented to nanoseconds
round the fractional part of a float to our precision, p """
cdef:
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index b7e4de81da35c..1fc1347c8b9e3 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -322,7 +322,7 @@ cpdef bint tz_compare(object start, object end):
return get_timezone(start) == get_timezone(end)
-cpdef tz_standardize(object tz):
+def tz_standardize(tz: object):
"""
If the passed tz is a pytz timezone object, "normalize" it to the a
consistent version
diff --git a/pandas/tests/tslibs/test_timedeltas.py b/pandas/tests/tslibs/test_timedeltas.py
new file mode 100644
index 0000000000000..939c2b828a75f
--- /dev/null
+++ b/pandas/tests/tslibs/test_timedeltas.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
+
+
+def test_delta_to_nanoseconds():
+ obj = np.timedelta64(14, 'D')
+ result = delta_to_nanoseconds(obj)
+ assert result == 14 * 24 * 3600 * 1e9
+
+ obj = pd.Timedelta(minutes=-7)
+ result = delta_to_nanoseconds(obj)
+ assert result == -7 * 60 * 1e9
+
+ obj = pd.Timedelta(minutes=-7).to_pytimedelta()
+ result = delta_to_nanoseconds(obj)
+ assert result == -7 * 60 * 1e9
+
+ obj = pd.offsets.Nano(125)
+ result = delta_to_nanoseconds(obj)
+ assert result == 125
+
+ obj = 1
+ result = delta_to_nanoseconds(obj)
+ assert obj == 1
+
+ obj = np.int64(2)
+ result = delta_to_nanoseconds(obj)
+ assert obj == 2
+
+ obj = np.int32(3)
+ result = delta_to_nanoseconds(obj)
+ assert result == 3
+
+ obj = np.array([123456789], dtype='m8[ns]')
+ with pytest.raises(TypeError):
+ delta_to_nanoseconds(obj)
| Some minor cython cleanup along the way | https://api.github.com/repos/pandas-dev/pandas/pulls/23302 | 2018-10-23T17:55:15Z | 2018-10-26T00:22:32Z | 2018-10-26T00:22:32Z | 2018-10-26T00:25:24Z |
DOC: Added Examples for Series max | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 31b700abcfdb3..0dcd46fc11475 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9362,7 +9362,7 @@ def compound(self, axis=None, skipna=None, level=None):
"""This method returns the maximum of the values in the object.
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
- nanops.nanmax)
+ nanops.nanmax, _max_examples)
cls.min = _make_stat_function(
cls, 'min', name, name2, axis_descr,
"""This method returns the minimum of the values in the object.
@@ -10210,6 +10210,44 @@ def _doc_parms(cls):
nan
"""
+_max_examples = """\
+Examples
+--------
+``MultiIndex`` series example of monthly rainfall
+
+>>> index = pd.MultiIndex.from_product(
+... [['London', 'New York'], ['Jun', 'Jul', 'Aug']],
+... names=['city', 'month'])
+>>> s = pd.Series([47, 35, 54, 112, 117, 113], index=index)
+>>> s
+city month
+London Jun 47
+ Jul 35
+ Aug 54
+New York Jun 112
+ Jul 117
+ Aug 113
+dtype: int64
+
+>>> s.max()
+117
+
+Max using level names, as well as indices
+
+>>> s.max(level='city')
+city
+London 54
+New York 117
+dtype: int64
+
+>>> s.max(level=1)
+month
+Jun 112
+Jul 117
+Aug 113
+dtype: int64
+"""
+
_min_count_stub = """\
min_count : int, default 0
@@ -10247,9 +10285,10 @@ def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
return set_function_name(stat_func, name, cls)
-def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f):
+def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f,
+ examples=''):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
- axis_descr=axis_descr, min_count='', examples='')
+ axis_descr=axis_descr, min_count='', examples=examples)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
| - [X] tests passed locally (to the same extent as master)
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` | https://api.github.com/repos/pandas-dev/pandas/pulls/23298 | 2018-10-23T15:05:03Z | 2018-10-25T16:54:24Z | 2018-10-25T16:54:24Z | 2018-10-26T09:47:48Z |
Categorical take fill value | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 7a10e8d1073d0..a41b0c9521f99 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -973,6 +973,7 @@ Categorical
- Bug in :meth:`Categorical.sort_values` where ``NaN`` values were always positioned in front regardless of ``na_position`` value. (:issue:`22556`).
- Bug when indexing with a boolean-valued ``Categorical``. Now a boolean-valued ``Categorical`` is treated as a boolean mask (:issue:`22665`)
- Constructing a :class:`CategoricalIndex` with empty values and boolean categories was raising a ``ValueError`` after a change to dtype coercion (:issue:`22702`).
+- Bug in :meth:`Categorical.take` with a user-provided ``fill_value`` not encoding the ``fill_value``, which could result in a ``ValueError``, incorrect results, or a segmentation fault (:issue:`23296`).
Datetimelike
^^^^^^^^^^^^
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 8735284617f31..1bc0d18bead83 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1768,8 +1768,10 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
Parameters
----------
- indexer : sequence of integers
- allow_fill : bool, default None.
+ indexer : sequence of int
+ The indices in `self` to take. The meaning of negative values in
+ `indexer` depends on the value of `allow_fill`.
+ allow_fill : bool, default None
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
@@ -1786,11 +1788,52 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
default is ``True``. In the future, this will change to
``False``.
+ fill_value : object
+ The value to use for `indices` that are missing (-1), when
+ ``allow_fill=True``. This should be the category, i.e. a value
+ in ``self.categories``, not a code.
+
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
+
+ See Also
+ --------
+ Series.take : Similar method for Series.
+ numpy.ndarray.take : Similar method for NumPy arrays.
+
+ Examples
+ --------
+ >>> cat = pd.Categorical(['a', 'a', 'b'])
+ >>> cat
+ [a, a, b]
+ Categories (2, object): [a, b]
+
+ Specify ``allow_fill==False`` to have negative indices mean indexing
+ from the right.
+
+ >>> cat.take([0, -1, -2], allow_fill=False)
+ [a, b, a]
+ Categories (2, object): [a, b]
+
+ With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
+ values that should be filled with the `fill_value`, which is
+ ``np.nan`` by default.
+
+ >>> cat.take([0, -1, -1], allow_fill=True)
+ [a, NaN, NaN]
+ Categories (2, object): [a, b]
+
+ The fill value can be specified.
+
+ >>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
+ [a, a, a]
+ Categories (3, object): [a, b]
+
+ Specifying a fill value that's not in ``self.categories``
+ will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
@@ -1798,14 +1841,26 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None):
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
+ dtype = self.dtype
+
if isna(fill_value):
- # For categorical, any NA value is considered a user-facing
- # NA value. Our storage NA value is -1.
fill_value = -1
+ elif allow_fill:
+ # convert user-provided `fill_value` to codes
+ if fill_value in self.categories:
+ fill_value = self.categories.get_loc(fill_value)
+ else:
+ msg = (
+ "'fill_value' ('{}') is not in this Categorical's "
+ "categories."
+ )
+ raise TypeError(msg.format(fill_value))
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
- result = self._constructor(codes, dtype=self.dtype, fastpath=True)
+ result = type(self).from_codes(codes,
+ categories=dtype.categories,
+ ordered=dtype.ordered)
return result
take = take_nd
diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py
index dcf2081ae32fe..d4a70e9a1ec2e 100644
--- a/pandas/tests/arrays/categorical/test_algos.py
+++ b/pandas/tests/arrays/categorical/test_algos.py
@@ -111,3 +111,32 @@ def test_positional_take_unobserved(self, ordered):
expected = pd.Categorical(['b', 'a'], categories=cat.categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
+
+ def test_take_allow_fill(self):
+ # https://github.com/pandas-dev/pandas/issues/23296
+ cat = pd.Categorical(['a', 'a', 'b'])
+ result = cat.take([0, -1, -1], allow_fill=True)
+ expected = pd.Categorical(['a', np.nan, np.nan],
+ categories=['a', 'b'])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_take_fill_with_negative_one(self):
+ # -1 was a category
+ cat = pd.Categorical([-1, 0, 1])
+ result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1)
+ expected = pd.Categorical([-1, -1, 0], categories=[-1, 0, 1])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_take_fill_value(self):
+ # https://github.com/pandas-dev/pandas/issues/23296
+ cat = pd.Categorical(['a', 'b', 'c'])
+ result = cat.take([0, 1, -1], fill_value='a', allow_fill=True)
+ expected = pd.Categorical(['a', 'b', 'a'], categories=['a', 'b', 'c'])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_take_fill_value_new_raises(self):
+ # https://github.com/pandas-dev/pandas/issues/23296
+ cat = pd.Categorical(['a', 'b', 'c'])
+ xpr = r"'fill_value' \('d'\) is not in this Categorical's categories."
+ with tm.assert_raises_regex(TypeError, xpr):
+ cat.take([0, 1, -1], fill_value='d', allow_fill=True)
| Closes https://github.com/pandas-dev/pandas/issues/23296 | https://api.github.com/repos/pandas-dev/pandas/pulls/23297 | 2018-10-23T14:04:23Z | 2018-10-23T19:52:36Z | 2018-10-23T19:52:36Z | 2018-10-23T19:52:39Z |
TST: stricter monotonicity/uniqueness tests (part 2) | diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 49a247608ab0b..ee91b3075b0a1 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -353,8 +353,8 @@ def test_has_duplicates(self, indices):
pytest.skip('Skip check for empty Index and MultiIndex')
idx = self._holder([indices[0]] * 5)
- assert not idx.is_unique
- assert idx.has_duplicates
+ assert idx.is_unique is False
+ assert idx.has_duplicates is True
@pytest.mark.parametrize('keep', ['first', 'last', False])
def test_duplicated(self, indices, keep):
@@ -414,7 +414,7 @@ def test_get_unique_index(self, indices):
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
- assert idx_unique.is_unique
+ assert idx_unique.is_unique is True
try:
assert not idx_unique.hasnans
except NotImplementedError:
@@ -438,7 +438,7 @@ def test_get_unique_index(self, indices):
vals_unique = vals[:2]
idx_nan = indices._shallow_copy(vals)
idx_unique_nan = indices._shallow_copy(vals_unique)
- assert idx_unique_nan.is_unique
+ assert idx_unique_nan.is_unique is True
assert idx_nan.dtype == indices.dtype
assert idx_unique_nan.dtype == indices.dtype
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index 0ff5ab232d670..f33106e61662f 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -243,108 +243,108 @@ def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
- assert idx.is_unique
+ assert idx.is_unique is True
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
- assert idx.is_unique
+ assert idx.is_unique is True
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
- assert idx.is_unique
+ assert idx.is_unique is True
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
- assert idx.is_unique
+ assert idx.is_unique is True
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
- assert not idx.is_unique
+ assert idx.is_unique is False
# empty
idx = IntervalIndex([], closed=closed)
- assert idx.is_unique
+ assert idx.is_unique is True
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
- assert idx.is_monotonic
- assert idx._is_strictly_monotonic_increasing
- assert not idx.is_monotonic_decreasing
- assert not idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is True
+ assert idx._is_strictly_monotonic_increasing is True
+ assert idx.is_monotonic_decreasing is False
+ assert idx._is_strictly_monotonic_decreasing is False
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
- assert not idx.is_monotonic
- assert not idx._is_strictly_monotonic_increasing
- assert idx.is_monotonic_decreasing
- assert idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is False
+ assert idx._is_strictly_monotonic_increasing is False
+ assert idx.is_monotonic_decreasing is True
+ assert idx._is_strictly_monotonic_decreasing is True
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
- assert not idx.is_monotonic
- assert not idx._is_strictly_monotonic_increasing
- assert not idx.is_monotonic_decreasing
- assert not idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is False
+ assert idx._is_strictly_monotonic_increasing is False
+ assert idx.is_monotonic_decreasing is False
+ assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
- assert idx.is_monotonic
- assert idx._is_strictly_monotonic_increasing
- assert not idx.is_monotonic_decreasing
- assert not idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is True
+ assert idx._is_strictly_monotonic_increasing is True
+ assert idx.is_monotonic_decreasing is False
+ assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
- assert not idx.is_monotonic
- assert not idx._is_strictly_monotonic_increasing
- assert idx.is_monotonic_decreasing
- assert idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is False
+ assert idx._is_strictly_monotonic_increasing is False
+ assert idx.is_monotonic_decreasing is True
+ assert idx._is_strictly_monotonic_decreasing is True
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
- assert not idx.is_monotonic
- assert not idx._is_strictly_monotonic_increasing
- assert not idx.is_monotonic_decreasing
- assert not idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is False
+ assert idx._is_strictly_monotonic_increasing is False
+ assert idx.is_monotonic_decreasing is False
+ assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
- assert idx.is_monotonic
- assert idx._is_strictly_monotonic_increasing
- assert not idx.is_monotonic_decreasing
- assert not idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is True
+ assert idx._is_strictly_monotonic_increasing is True
+ assert idx.is_monotonic_decreasing is False
+ assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
- assert not idx.is_monotonic
- assert not idx._is_strictly_monotonic_increasing
- assert idx.is_monotonic_decreasing
- assert idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is False
+ assert idx._is_strictly_monotonic_increasing is False
+ assert idx.is_monotonic_decreasing is True
+ assert idx._is_strictly_monotonic_decreasing is True
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
- assert idx.is_monotonic
- assert not idx._is_strictly_monotonic_increasing
- assert idx.is_monotonic_decreasing
- assert not idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is True
+ assert idx._is_strictly_monotonic_increasing is False
+ assert idx.is_monotonic_decreasing is True
+ assert idx._is_strictly_monotonic_decreasing is False
# empty
idx = IntervalIndex([], closed=closed)
- assert idx.is_monotonic
- assert idx._is_strictly_monotonic_increasing
- assert idx.is_monotonic_decreasing
- assert idx._is_strictly_monotonic_decreasing
+ assert idx.is_monotonic is True
+ assert idx._is_strictly_monotonic_increasing is True
+ assert idx.is_monotonic_decreasing is True
+ assert idx._is_strictly_monotonic_decreasing is True
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr(self):
diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py
index 54a12137c9457..dfc9f329e0753 100644
--- a/pandas/tests/indexes/multi/test_duplicates.py
+++ b/pandas/tests/indexes/multi/test_duplicates.py
@@ -131,16 +131,16 @@ def test_duplicate_meta_data():
def test_has_duplicates(idx, idx_dup):
# see fixtures
- assert idx.is_unique
- assert not idx.has_duplicates
- assert not idx_dup.is_unique
- assert idx_dup.has_duplicates
+ assert idx.is_unique is True
+ assert idx.has_duplicates is False
+ assert idx_dup.is_unique is False
+ assert idx_dup.has_duplicates is True
mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
- assert not mi.is_unique
- assert mi.has_duplicates
+ assert mi.is_unique is False
+ assert mi.has_duplicates is True
def test_has_duplicates_from_tuples():
diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py
index 7a8f8b60d31ba..a2401035c80f8 100644
--- a/pandas/tests/indexes/multi/test_integrity.py
+++ b/pandas/tests/indexes/multi/test_integrity.py
@@ -110,7 +110,7 @@ def test_consistency():
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
- assert not index.is_unique
+ assert index.is_unique is False
def test_hash_collisions():
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 6b5ba373eb10b..4a597967d3d5d 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -676,7 +676,7 @@ def gen_expected(df, mask):
df.take(mask[1:])])
df = gen_test(900, 100)
- assert not df.index.is_unique
+ assert df.index.is_unique is False
mask = np.arange(100)
result = df.loc[mask]
@@ -684,7 +684,7 @@ def gen_expected(df, mask):
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
- assert not df.index.is_unique
+ assert df.index.is_unique is False
mask = np.arange(100000)
result = df.loc[mask]
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index aa4f58089a933..6e491cbb8ba79 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -387,7 +387,7 @@ def test_set_value(test_data):
def test_setslice(test_data):
sl = test_data.ts[5:20]
assert len(sl) == len(sl.index)
- assert sl.index.is_unique
+ assert sl.index.is_unique is True
# FutureWarning from NumPy about [slice(None, 5).
diff --git a/pandas/tests/series/test_duplicates.py b/pandas/tests/series/test_duplicates.py
index 2e4d64188307c..735ecd3917a1b 100644
--- a/pandas/tests/series/test_duplicates.py
+++ b/pandas/tests/series/test_duplicates.py
@@ -63,9 +63,9 @@ def test_unique_data_ownership():
def test_is_unique():
# GH11946
s = Series(np.random.randint(0, 10, size=1000))
- assert not s.is_unique
+ assert s.is_unique is False
s = Series(np.arange(1000))
- assert s.is_unique
+ assert s.is_unique is True
def test_is_unique_class_ne(capsys):
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 557669260604a..d491df587fb4a 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -1109,7 +1109,7 @@ def test_datetime_likes(self):
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
- assert case.is_unique
+ assert case.is_unique is True
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 0dbbe60283cac..eeff3cb0199ec 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -495,7 +495,7 @@ def test_xs_partial(self):
def test_xs_with_duplicates(self):
# Issue #13719
df_dup = concat([self.frame] * 2)
- assert not df_dup.index.is_unique
+ assert df_dup.index.is_unique is False
expected = concat([self.frame.xs('one', level='second')] * 2)
tm.assert_frame_equal(df_dup.xs('one', level='second'), expected)
tm.assert_frame_equal(df_dup.xs(['one'], level=['second']), expected)
@@ -889,7 +889,7 @@ def test_stack(self):
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
- assert not left.index.is_unique
+ assert left.index.is_unique is False
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
@@ -1922,7 +1922,7 @@ def test_drop_level_nonunique_datetime(self):
df['tstamp'] = idxdt
df = df.set_index('tstamp', append=True)
ts = Timestamp('201603231600')
- assert not df.index.is_unique
+ assert df.index.is_unique is False
result = df.drop(ts, level='tstamp')
expected = df.loc[idx != 4]
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py
index b62260071d996..403fed418cae2 100644
--- a/pandas/tests/util/test_hashing.py
+++ b/pandas/tests/util/test_hashing.py
@@ -110,9 +110,9 @@ def test_hash_tuples_err(self, val):
def test_multiindex_unique(self):
mi = MultiIndex.from_tuples([(118, 472), (236, 118),
(51, 204), (102, 51)])
- assert mi.is_unique
+ assert mi.is_unique is True
result = hash_pandas_object(mi)
- assert result.is_unique
+ assert result.is_unique is True
def test_multiindex_objects(self):
mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
| - [x] xref #23256
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Continuation of #23256.
By making the uniqueness tests stricter (i.e. testing for actual True/False rather than truthy/Falsy values) I think we get better ensurance that some PR doesn't accidentally turn a property into a method or that the property doesn't accidentally returns a non-boolean return value. | https://api.github.com/repos/pandas-dev/pandas/pulls/23294 | 2018-10-23T10:06:12Z | 2018-10-24T12:34:51Z | 2018-10-24T12:34:51Z | 2018-10-27T08:15:45Z |
Add __array_ufunc__ to Series / Array | diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst
index 363ec10d58bb6..12af80f1bce80 100644
--- a/doc/source/development/extending.rst
+++ b/doc/source/development/extending.rst
@@ -208,6 +208,25 @@ will
2. call ``result = op(values, ExtensionArray)``
3. re-box the result in a ``Series``
+.. _extending.extension.ufunc:
+
+NumPy Universal Functions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:class:`Series` implements ``__array_ufunc__``. As part of the implementation,
+pandas unboxes the ``ExtensionArray`` from the :class:`Series`, applies the ufunc,
+and re-boxes it if necessary.
+
+If applicable, we highly recommend that you implement ``__array_ufunc__`` in your
+extension array to avoid coercion to an ndarray. See
+`the numpy documentation <https://docs.scipy.org/doc/numpy/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html>`__
+for an example.
+
+As part of your implementation, we require that you defer to pandas when a pandas
+container (:class:`Series`, :class:`DataFrame`, :class:`Index`) is detected in ``inputs``.
+If any of those is present, you should return ``NotImplemented``. Pandas will take care of
+unboxing the array from the container and re-calling the ufunc with the unwrapped input.
+
.. _extending.extension.testing:
Testing extension arrays
diff --git a/doc/source/getting_started/dsintro.rst b/doc/source/getting_started/dsintro.rst
index 914c55115567a..33e5d390447d7 100644
--- a/doc/source/getting_started/dsintro.rst
+++ b/doc/source/getting_started/dsintro.rst
@@ -731,28 +731,62 @@ DataFrame interoperability with NumPy functions
.. _dsintro.numpy_interop:
Elementwise NumPy ufuncs (log, exp, sqrt, ...) and various other NumPy functions
-can be used with no issues on DataFrame, assuming the data within are numeric:
+can be used with no issues on Series and DataFrame, assuming the data within
+are numeric:
.. ipython:: python
np.exp(df)
np.asarray(df)
-The dot method on DataFrame implements matrix multiplication:
+DataFrame is not intended to be a drop-in replacement for ndarray as its
+indexing semantics and data model are quite different in places from an n-dimensional
+array.
+
+:class:`Series` implements ``__array_ufunc__``, which allows it to work with NumPy's
+`universal functions <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_.
+
+The ufunc is applied to the underlying array in a Series.
.. ipython:: python
- df.T.dot(df)
+ ser = pd.Series([1, 2, 3, 4])
+ np.exp(ser)
-Similarly, the dot method on Series implements dot product:
+Like other parts of the library, pandas will automatically align labeled inputs
+as part of a ufunc with multiple inputs. For example, using :meth:`numpy.remainder`
+on two :class:`Series` with differently ordered labels will align before the operation.
.. ipython:: python
- s1 = pd.Series(np.arange(5, 10))
- s1.dot(s1)
+ ser1 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
+ ser2 = pd.Series([1, 3, 5], index=['b', 'a', 'c'])
+ ser1
+ ser2
+ np.remainder(ser1, ser2)
-DataFrame is not intended to be a drop-in replacement for ndarray as its
-indexing semantics are quite different in places from a matrix.
+As usual, the union of the two indices is taken, and non-overlapping values are filled
+with missing values.
+
+.. ipython:: python
+
+ ser3 = pd.Series([2, 4, 6], index=['b', 'c', 'd'])
+ ser3
+ np.remainder(ser1, ser3)
+
+When a binary ufunc is applied to a :class:`Series` and :class:`Index`, the Series
+implementation takes precedence and a Series is returned.
+
+.. ipython:: python
+
+ ser = pd.Series([1, 2, 3])
+ idx = pd.Index([4, 5, 6])
+
+ np.maximum(ser, idx)
+
+NumPy ufuncs are safe to apply to :class:`Series` backed by non-ndarray arrays,
+for example :class:`SparseArray` (see :ref:`sparse.calculation`). If possible,
+the ufunc is applied without converting the underlying data to an ndarray.
Console display
~~~~~~~~~~~~~~~
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index a2f93dcf337d7..4f44fcaab63d4 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -5,6 +5,7 @@
Computational tools
===================
+
Statistical functions
---------------------
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 30ae4ebe21ca4..8850ee79a893b 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -886,6 +886,7 @@ Sparse
- Introduce a better error message in :meth:`Series.sparse.from_coo` so it returns a ``TypeError`` for inputs that are not coo matrices (:issue:`26554`)
- Bug in :func:`numpy.modf` on a :class:`SparseArray`. Now a tuple of :class:`SparseArray` is returned (:issue:`26946`).
+
Build Changes
^^^^^^^^^^^^^
@@ -896,6 +897,7 @@ ExtensionArray
- Bug in :func:`factorize` when passing an ``ExtensionArray`` with a custom ``na_sentinel`` (:issue:`25696`).
- :meth:`Series.count` miscounts NA values in ExtensionArrays (:issue:`26835`)
+- Added ``Series.__array_ufunc__`` to better handle NumPy ufuncs applied to Series backed by extension arrays (:issue:`23293`).
- Keyword argument ``deep`` has been removed from :meth:`ExtensionArray.copy` (:issue:`27083`)
Other
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py
index 6340cc732d6c1..0762a607f20ae 100644
--- a/pandas/core/arrays/base.py
+++ b/pandas/core/arrays/base.py
@@ -107,6 +107,17 @@ class ExtensionArray:
attributes called ``.values`` or ``._values`` to ensure full compatibility
with pandas internals. But other names as ``.data``, ``._data``,
``._items``, ... can be freely used.
+
+ If implementing NumPy's ``__array_ufunc__`` interface, pandas expects
+ that
+
+ 1. You defer by raising ``NotImplemented`` when any Series are present
+ in `inputs`. Pandas will extract the arrays and call the ufunc again.
+ 2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class.
+ Pandas inspect this to determine whether the ufunc is valid for the
+ types present.
+
+ See :ref:`extending.extension.ufunc` for more.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
# Don't override this.
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 68c7b79becb55..b77a4f985067d 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -26,6 +26,7 @@
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
+from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import factorize, take, take_1d, unique1d
@@ -1292,6 +1293,20 @@ def __array__(self, dtype=None):
ret = np.asarray(ret)
return ret
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ # for binary ops, use our custom dunder methods
+ result = ops.maybe_dispatch_ufunc_to_dunder_op(
+ self, ufunc, method, *inputs, **kwargs)
+ if result is not NotImplemented:
+ return result
+
+ # for all other cases, raise for now (similarly as what happens in
+ # Series.__array_prepare__)
+ raise TypeError("Object with dtype {dtype} cannot perform "
+ "the numpy op {op}".format(
+ dtype=self.dtype,
+ op=ufunc.__name__))
+
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 88de497a3329f..644c2f634240f 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -1,3 +1,4 @@
+import numbers
import sys
from typing import Type
import warnings
@@ -17,7 +18,7 @@
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna, notna
-from pandas.core import nanops
+from pandas.core import nanops, ops
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.tools.numeric import to_numeric
@@ -344,6 +345,52 @@ def __array__(self, dtype=None):
"""
return self._coerce_to_ndarray()
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ # For IntegerArray inputs, we apply the ufunc to ._data
+ # and mask the result.
+ if method == 'reduce':
+ # Not clear how to handle missing values in reductions. Raise.
+ raise NotImplementedError("The 'reduce' method is not supported.")
+ out = kwargs.get('out', ())
+
+ for x in inputs + out:
+ if not isinstance(x, self._HANDLED_TYPES + (IntegerArray,)):
+ return NotImplemented
+
+ # for binary ops, use our custom dunder methods
+ result = ops.maybe_dispatch_ufunc_to_dunder_op(
+ self, ufunc, method, *inputs, **kwargs)
+ if result is not NotImplemented:
+ return result
+
+ mask = np.zeros(len(self), dtype=bool)
+ inputs2 = []
+ for x in inputs:
+ if isinstance(x, IntegerArray):
+ mask |= x._mask
+ inputs2.append(x._data)
+ else:
+ inputs2.append(x)
+
+ def reconstruct(x):
+ # we don't worry about scalar `x` here, since we
+ # raise for reduce up above.
+
+ if is_integer_dtype(x.dtype):
+ m = mask.copy()
+ return IntegerArray(x, m)
+ else:
+ x[mask] = np.nan
+ return x
+
+ result = getattr(ufunc, method)(*inputs2, **kwargs)
+ if isinstance(result, tuple):
+ tuple(reconstruct(x) for x in result)
+ else:
+ return reconstruct(result)
+
def __iter__(self):
for i in range(len(self)):
if self._mask[i]:
diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py
index 97ab6ec8235ef..29cc899fa6a9b 100644
--- a/pandas/core/arrays/sparse.py
+++ b/pandas/core/arrays/sparse.py
@@ -38,6 +38,7 @@
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
+import pandas.core.ops as ops
import pandas.io.formats.printing as printing
@@ -1665,42 +1666,11 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
return NotImplemented
- special = {'add', 'sub', 'mul', 'pow', 'mod', 'floordiv', 'truediv',
- 'divmod', 'eq', 'ne', 'lt', 'gt', 'le', 'ge', 'remainder'}
- aliases = {
- 'subtract': 'sub',
- 'multiply': 'mul',
- 'floor_divide': 'floordiv',
- 'true_divide': 'truediv',
- 'power': 'pow',
- 'remainder': 'mod',
- 'divide': 'div',
- 'equal': 'eq',
- 'not_equal': 'ne',
- 'less': 'lt',
- 'less_equal': 'le',
- 'greater': 'gt',
- 'greater_equal': 'ge',
- }
-
- flipped = {
- 'lt': '__gt__',
- 'le': '__ge__',
- 'gt': '__lt__',
- 'ge': '__le__',
- 'eq': '__eq__',
- 'ne': '__ne__',
- }
-
- op_name = ufunc.__name__
- op_name = aliases.get(op_name, op_name)
-
- if op_name in special and kwargs.get('out') is None:
- if isinstance(inputs[0], type(self)):
- return getattr(self, '__{}__'.format(op_name))(inputs[1])
- else:
- name = flipped.get(op_name, '__r{}__'.format(op_name))
- return getattr(self, name)(inputs[0])
+ # for binary ops, use our custom dunder methods
+ result = ops.maybe_dispatch_ufunc_to_dunder_op(
+ self, ufunc, method, *inputs, **kwargs)
+ if result is not NotImplemented:
+ return result
if len(inputs) == 1:
# No alignment necessary.
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index a4d31cb227f19..5dd8455073212 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -6,7 +6,7 @@
import datetime
import operator
import textwrap
-from typing import Dict, Optional
+from typing import Any, Callable, Dict, Optional
import warnings
import numpy as np
@@ -29,6 +29,7 @@
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
+from pandas._typing import ArrayLike
import pandas.core.common as com
import pandas.core.missing as missing
@@ -1660,7 +1661,14 @@ def na_op(x, y):
lambda val: op(val, y))
raise
- result = missing.fill_zeros(result, x, y, op_name, fill_zeros)
+ if isinstance(result, tuple):
+ # e.g. divmod
+ result = tuple(
+ missing.fill_zeros(r, x, y, op_name, fill_zeros)
+ for r in result
+ )
+ else:
+ result = missing.fill_zeros(result, x, y, op_name, fill_zeros)
return result
def wrapper(left, right):
@@ -2349,3 +2357,78 @@ def wrapper(self, other):
wrapper.__name__ = op_name
return wrapper
+
+
+def maybe_dispatch_ufunc_to_dunder_op(
+ self: ArrayLike,
+ ufunc: Callable,
+ method: str,
+ *inputs: ArrayLike,
+ **kwargs: Any
+):
+ """
+ Dispatch a ufunc to the equivalent dunder method.
+
+ Parameters
+ ----------
+ self : ArrayLike
+ The array whose dunder method we dispatch to
+ ufunc : Callable
+ A NumPy ufunc
+ method : {'reduce', 'accumulate', 'reduceat', 'outer', 'at', '__call__'}
+ inputs : ArrayLike
+ The input arrays.
+ kwargs : Any
+ The additional keyword arguments, e.g. ``out``.
+
+ Returns
+ -------
+ result : Any
+ The result of applying the ufunc
+ """
+ # special has the ufuncs we dispatch to the dunder op on
+ special = {'add', 'sub', 'mul', 'pow', 'mod', 'floordiv', 'truediv',
+ 'divmod', 'eq', 'ne', 'lt', 'gt', 'le', 'ge', 'remainder',
+ 'matmul'}
+ aliases = {
+ 'subtract': 'sub',
+ 'multiply': 'mul',
+ 'floor_divide': 'floordiv',
+ 'true_divide': 'truediv',
+ 'power': 'pow',
+ 'remainder': 'mod',
+ 'divide': 'div',
+ 'equal': 'eq',
+ 'not_equal': 'ne',
+ 'less': 'lt',
+ 'less_equal': 'le',
+ 'greater': 'gt',
+ 'greater_equal': 'ge',
+ }
+
+ # For op(., Array) -> Array.__r{op}__
+ flipped = {
+ 'lt': '__gt__',
+ 'le': '__ge__',
+ 'gt': '__lt__',
+ 'ge': '__le__',
+ 'eq': '__eq__',
+ 'ne': '__ne__',
+ }
+
+ op_name = ufunc.__name__
+ op_name = aliases.get(op_name, op_name)
+
+ def not_implemented(*args, **kwargs):
+ return NotImplemented
+
+ if (method == '__call__' and op_name in special
+ and kwargs.get('out') is None):
+ if isinstance(inputs[0], type(self)):
+ name = '__{}__'.format(op_name)
+ return getattr(self, name, not_implemented)(inputs[1])
+ else:
+ name = flipped.get(op_name, '__r{}__'.format(op_name))
+ return getattr(self, name, not_implemented)(inputs[0])
+ else:
+ return NotImplemented
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f415bc9fd3561..9179099562832 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -5,6 +5,7 @@
from io import StringIO
from shutil import get_terminal_size
from textwrap import dedent
+from typing import Any, Callable
import warnings
import numpy as np
@@ -714,6 +715,84 @@ def view(self, dtype=None):
# ----------------------------------------------------------------------
# NDArray Compat
+ _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)
+
+ def __array_ufunc__(
+ self,
+ ufunc: Callable,
+ method: str,
+ *inputs: Any,
+ **kwargs: Any
+ ):
+ # TODO: handle DataFrame
+ from pandas.core.internals.construction import extract_array
+ cls = type(self)
+
+ # for binary ops, use our custom dunder methods
+ result = ops.maybe_dispatch_ufunc_to_dunder_op(
+ self, ufunc, method, *inputs, **kwargs)
+ if result is not NotImplemented:
+ return result
+
+ # Determine if we should defer.
+ no_defer = (np.ndarray.__array_ufunc__, cls.__array_ufunc__)
+
+ for item in inputs:
+ higher_priority = (
+ hasattr(item, '__array_priority__') and
+ item.__array_priority__ > self.__array_priority__
+ )
+ has_array_ufunc = (
+ hasattr(item, '__array_ufunc__') and
+ type(item).__array_ufunc__ not in no_defer and
+ not isinstance(item, self._HANDLED_TYPES)
+ )
+ if higher_priority or has_array_ufunc:
+ return NotImplemented
+
+ # align all the inputs.
+ names = [getattr(x, 'name') for x in inputs if hasattr(x, 'name')]
+ types = tuple(type(x) for x in inputs)
+ # TODO: dataframe
+ alignable = [x for x, t in zip(inputs, types) if issubclass(t, Series)]
+
+ if len(alignable) > 1:
+ # This triggers alignment.
+ # At the moment, there aren't any ufuncs with more than two inputs
+ # so this ends up just being x1.index | x2.index, but we write
+ # it to handle *args.
+ index = alignable[0].index
+ for s in alignable[1:]:
+ index |= s.index
+ inputs = tuple(x.reindex(index) if issubclass(t, Series) else x
+ for x, t in zip(inputs, types))
+ else:
+ index = self.index
+
+ inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
+ result = getattr(ufunc, method)(*inputs, **kwargs)
+ if len(set(names)) == 1:
+ # we require names to be hashable, right?
+ name = names[0] # type: Any
+ else:
+ name = None
+
+ def construct_return(result):
+ if lib.is_scalar(result):
+ return result
+ return self._constructor(result,
+ index=index,
+ name=name,
+ copy=False)
+
+ if type(result) is tuple:
+ # multiple return values
+ return tuple(construct_return(x) for x in result)
+ elif method == 'at':
+ # no return value
+ return None
+ else:
+ return construct_return(result)
def __array__(self, dtype=None):
"""
@@ -776,30 +855,6 @@ def __array__(self, dtype=None):
dtype = 'M8[ns]'
return np.asarray(self.array, dtype)
- def __array_wrap__(self, result, context=None):
- """
- Gets called after a ufunc.
- """
- return self._constructor(result, index=self.index,
- copy=False).__finalize__(self)
-
- def __array_prepare__(self, result, context=None):
- """
- Gets called prior to a ufunc.
- """
-
- # nice error message for non-ufunc types
- if (context is not None and
- (not isinstance(self._values, (np.ndarray, ExtensionArray))
- or isinstance(self._values, Categorical))):
- obj = context[1][0]
- raise TypeError("{obj} with dtype {dtype} cannot perform "
- "the numpy op {op}".format(
- obj=type(obj).__name__,
- dtype=getattr(obj, 'dtype', None),
- op=context[0].__name__))
- return result
-
# ----------------------------------------------------------------------
# Unary Methods
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py
index b1091d38c10d0..908e197ec1d28 100644
--- a/pandas/tests/arithmetic/test_datetime64.py
+++ b/pandas/tests/arithmetic/test_datetime64.py
@@ -670,6 +670,10 @@ def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
@pytest.mark.parametrize('other', [datetime(2016, 1, 1),
Timestamp('2016-01-01'),
np.datetime64('2016-01-01')])
+ # Bug in NumPy? https://github.com/numpy/numpy/issues/13841
+ # Raising in __eq__ will fallback to NumPy, which warns, fails,
+ # then re-raises the original exception. So we just need to ignore.
+ @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
def test_scalar_comparison_tzawareness(self, op, other, tz_aware_fixture,
box_with_array):
tz = tz_aware_fixture
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py
index f58f8981317df..31c7f47bcf5bd 100644
--- a/pandas/tests/arithmetic/test_numeric.py
+++ b/pandas/tests/arithmetic/test_numeric.py
@@ -891,6 +891,25 @@ def test_ufunc_coercions(self, holder):
exp = tm.box_expected(exp, box)
tm.assert_equal(result, exp)
+ @pytest.mark.parametrize('holder', [pd.Int64Index, pd.UInt64Index,
+ pd.Float64Index, pd.Series])
+ def test_ufunc_multiple_return_values(self, holder):
+ obj = holder([1, 2, 3], name='x')
+ box = pd.Series if holder is pd.Series else pd.Index
+
+ result = np.modf(obj)
+ assert isinstance(result, tuple)
+ exp1 = pd.Float64Index([0., 0., 0.], name='x')
+ exp2 = pd.Float64Index([1., 2., 3.], name='x')
+ tm.assert_equal(result[0], tm.box_expected(exp1, box))
+ tm.assert_equal(result[1], tm.box_expected(exp2, box))
+
+ def test_ufunc_at(self):
+ s = pd.Series([0, 1, 2], index=[1, 2, 3], name='x')
+ np.add.at(s, [0, 2], 10)
+ expected = pd.Series([10, 1, 12], index=[1, 2, 3], name='x')
+ tm.assert_series_equal(s, expected)
+
class TestObjectDtypeEquivalence:
# Tests that arithmetic operations match operations executed elementwise
diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py
index 65f7628370ad4..fb62a90a6007e 100644
--- a/pandas/tests/arrays/test_integer.py
+++ b/pandas/tests/arrays/test_integer.py
@@ -717,6 +717,74 @@ def test_astype_nansafe():
arr.astype('uint32')
+@pytest.mark.parametrize(
+ 'ufunc', [np.abs, np.sign])
+def test_ufuncs_single_int(ufunc):
+ a = integer_array([1, 2, -3, np.nan])
+ result = ufunc(a)
+ expected = integer_array(ufunc(a.astype(float)))
+ tm.assert_extension_array_equal(result, expected)
+
+ s = pd.Series(a)
+ result = ufunc(s)
+ expected = pd.Series(integer_array(ufunc(a.astype(float))))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ 'ufunc', [np.log, np.exp, np.sin, np.cos, np.sqrt])
+def test_ufuncs_single_float(ufunc):
+ a = integer_array([1, 2, -3, np.nan])
+ with np.errstate(invalid='ignore'):
+ result = ufunc(a)
+ expected = ufunc(a.astype(float))
+ tm.assert_numpy_array_equal(result, expected)
+
+ s = pd.Series(a)
+ with np.errstate(invalid='ignore'):
+ result = ufunc(s)
+ expected = ufunc(s.astype(float))
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ 'ufunc', [np.add, np.subtract])
+def test_ufuncs_binary_int(ufunc):
+ # two IntegerArrays
+ a = integer_array([1, 2, -3, np.nan])
+ result = ufunc(a, a)
+ expected = integer_array(ufunc(a.astype(float), a.astype(float)))
+ tm.assert_extension_array_equal(result, expected)
+
+ # IntegerArray with numpy array
+ arr = np.array([1, 2, 3, 4])
+ result = ufunc(a, arr)
+ expected = integer_array(ufunc(a.astype(float), arr))
+ tm.assert_extension_array_equal(result, expected)
+
+ result = ufunc(arr, a)
+ expected = integer_array(ufunc(arr, a.astype(float)))
+ tm.assert_extension_array_equal(result, expected)
+
+ # IntegerArray with scalar
+ result = ufunc(a, 1)
+ expected = integer_array(ufunc(a.astype(float), 1))
+ tm.assert_extension_array_equal(result, expected)
+
+ result = ufunc(1, a)
+ expected = integer_array(ufunc(1, a.astype(float)))
+ tm.assert_extension_array_equal(result, expected)
+
+
+@pytest.mark.parametrize('values', [
+ [0, 1], [0, None]
+])
+def test_ufunc_reduce_raises(values):
+ a = integer_array(values)
+ with pytest.raises(NotImplementedError):
+ np.add.reduce(a)
+
+
# TODO(jreback) - these need testing / are broken
# shift
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index 2b1bb53e962be..d097a599730b8 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -84,6 +84,29 @@ def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
def _from_factorized(cls, values, original):
return cls(values)
+ _HANDLED_TYPES = (decimal.Decimal, numbers.Number, np.ndarray)
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ #
+ if not all(isinstance(t, self._HANDLED_TYPES + (DecimalArray,))
+ for t in inputs):
+ return NotImplemented
+
+ inputs = tuple(x._data if isinstance(x, DecimalArray) else x
+ for x in inputs)
+ result = getattr(ufunc, method)(*inputs, **kwargs)
+
+ def reconstruct(x):
+ if isinstance(x, (decimal.Decimal, numbers.Number)):
+ return x
+ else:
+ return DecimalArray._from_sequence(x)
+
+ if isinstance(result, tuple):
+ return tuple(reconstruct(x) for x in result)
+ else:
+ return reconstruct(result)
+
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
return self._data[item]
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 4625c79e1bc3d..80885e4045e64 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -390,6 +390,14 @@ def test_divmod_array(reverse, expected_div, expected_mod):
tm.assert_extension_array_equal(mod, expected_mod)
+def test_ufunc_fallback(data):
+ a = data[:5]
+ s = pd.Series(a, index=range(3, 8))
+ result = np.abs(s)
+ expected = pd.Series(np.abs(a), index=range(3, 8))
+ tm.assert_series_equal(result, expected)
+
+
def test_formatting_values_deprecated():
class DecimalArray2(DecimalArray):
def _formatting_values(self):
@@ -400,3 +408,39 @@ def _formatting_values(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
repr(ser)
+
+
+def test_array_ufunc():
+ a = to_decimal([1, 2, 3])
+ result = np.exp(a)
+ expected = to_decimal(np.exp(a._data))
+ tm.assert_extension_array_equal(result, expected)
+
+
+def test_array_ufunc_series():
+ a = to_decimal([1, 2, 3])
+ s = pd.Series(a)
+ result = np.exp(s)
+ expected = pd.Series(to_decimal(np.exp(a._data)))
+ tm.assert_series_equal(result, expected)
+
+
+def test_array_ufunc_series_scalar_other():
+ # check _HANDLED_TYPES
+ a = to_decimal([1, 2, 3])
+ s = pd.Series(a)
+ result = np.add(s, decimal.Decimal(1))
+ expected = pd.Series(np.add(a, decimal.Decimal(1)))
+ tm.assert_series_equal(result, expected)
+
+
+def test_array_ufunc_series_defer():
+ a = to_decimal([1, 2, 3])
+ s = pd.Series(a)
+
+ expected = pd.Series(to_decimal([2, 4, 6]))
+ r1 = np.add(s, a)
+ r2 = np.add(a, s)
+
+ tm.assert_series_equal(r1, expected)
+ tm.assert_series_equal(r2, expected)
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index aed08b78fe640..df69bb35115cf 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -484,18 +484,18 @@ def test_matmul(self):
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
- # Series @ DataFrame
+ # Series @ DataFrame -> Series
result = operator.matmul(a, b)
expected = Series(np.dot(a.values, b.values), index=['1', '2', '3'])
assert_series_equal(result, expected)
- # DataFrame @ Series
+ # DataFrame @ Series -> Series
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
- # Series @ Series
+ # Series @ Series -> scalar
result = operator.matmul(a, a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py
index 05d19452b1eac..1a0eeb51c4921 100644
--- a/pandas/tests/series/test_ufunc.py
+++ b/pandas/tests/series/test_ufunc.py
@@ -1,3 +1,4 @@
+from collections import deque
import string
import numpy as np
@@ -12,14 +13,12 @@
np.logaddexp,
]
SPARSE = [
- pytest.param(True,
- marks=pytest.mark.xfail(reason="Series.__array_ufunc__")),
- False,
+ True,
+ False
]
SPARSE_IDS = ['sparse', 'dense']
SHUFFLE = [
- pytest.param(True, marks=pytest.mark.xfail(reason="GH-26945",
- strict=False)),
+ True,
False
]
@@ -43,7 +42,7 @@ def test_unary_ufunc(ufunc, sparse):
array = np.random.randint(0, 10, 10, dtype='int64')
array[::2] = 0
if sparse:
- array = pd.SparseArray(array, dtype=pd.SparseDtype('int', 0))
+ array = pd.SparseArray(array, dtype=pd.SparseDtype('int64', 0))
index = list(string.ascii_letters[:10])
name = "name"
@@ -61,8 +60,8 @@ def test_binary_ufunc_with_array(flip, sparse, ufunc, arrays_for_binary_ufunc):
# Test that ufunc(Series(a), array) == Series(ufunc(a, b))
a1, a2 = arrays_for_binary_ufunc
if sparse:
- a1 = pd.SparseArray(a1, dtype=pd.SparseDtype('int', 0))
- a2 = pd.SparseArray(a2, dtype=pd.SparseDtype('int', 0))
+ a1 = pd.SparseArray(a1, dtype=pd.SparseDtype('int64', 0))
+ a2 = pd.SparseArray(a2, dtype=pd.SparseDtype('int64', 0))
name = "name" # op(Series, array) preserves the name.
series = pd.Series(a1, name=name)
@@ -82,18 +81,15 @@ def test_binary_ufunc_with_array(flip, sparse, ufunc, arrays_for_binary_ufunc):
@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
-@pytest.mark.parametrize("flip", [
- pytest.param(True, marks=pytest.mark.xfail(reason="Index should defer")),
- False
-], ids=['flipped', 'straight'])
+@pytest.mark.parametrize("flip", [True, False], ids=['flipped', 'straight'])
def test_binary_ufunc_with_index(flip, sparse, ufunc, arrays_for_binary_ufunc):
# Test that
# * func(Series(a), Series(b)) == Series(ufunc(a, b))
# * ufunc(Index, Series) dispatches to Series (returns a Series)
a1, a2 = arrays_for_binary_ufunc
if sparse:
- a1 = pd.SparseArray(a1, dtype=pd.SparseDtype('int', 0))
- a2 = pd.SparseArray(a2, dtype=pd.SparseDtype('int', 0))
+ a1 = pd.SparseArray(a1, dtype=pd.SparseDtype('int64', 0))
+ a2 = pd.SparseArray(a2, dtype=pd.SparseDtype('int64', 0))
name = "name" # op(Series, array) preserves the name.
series = pd.Series(a1, name=name)
@@ -121,14 +117,10 @@ def test_binary_ufunc_with_series(flip, shuffle, sparse, ufunc,
# Test that
# * func(Series(a), Series(b)) == Series(ufunc(a, b))
# with alignment between the indices
-
- if flip and shuffle:
- pytest.xfail(reason="Fix with Series.__array_ufunc__")
-
a1, a2 = arrays_for_binary_ufunc
if sparse:
- a1 = pd.SparseArray(a1, dtype=pd.SparseDtype('int', 0))
- a2 = pd.SparseArray(a2, dtype=pd.SparseDtype('int', 0))
+ a1 = pd.SparseArray(a1, dtype=pd.SparseDtype('int64', 0))
+ a2 = pd.SparseArray(a2, dtype=pd.SparseDtype('int64', 0))
name = "name" # op(Series, array) preserves the name.
series = pd.Series(a1, name=name)
@@ -138,8 +130,6 @@ def test_binary_ufunc_with_series(flip, shuffle, sparse, ufunc,
if shuffle:
other = other.take(idx)
- a2 = a2.take(idx)
- # alignment, so the expected index is the first index in the op.
if flip:
index = other.align(series)[0].index
else:
@@ -198,10 +188,13 @@ def test_multiple_ouput_binary_ufuncs(ufunc, sparse, shuffle,
pytest.skip("sparse divmod not implemented.")
a1, a2 = arrays_for_binary_ufunc
+ # work around https://github.com/pandas-dev/pandas/issues/26987
+ a1[a1 == 0] = 1
+ a2[a2 == 0] = 1
if sparse:
- a1 = pd.SparseArray(a1, dtype=pd.SparseDtype('int', 0))
- a2 = pd.SparseArray(a2, dtype=pd.SparseDtype('int', 0))
+ a1 = pd.SparseArray(a1, dtype=pd.SparseDtype('int64', 0))
+ a2 = pd.SparseArray(a2, dtype=pd.SparseDtype('int64', 0))
s1 = pd.Series(a1)
s2 = pd.Series(a2)
@@ -241,7 +234,6 @@ def test_multiple_ouput_ufunc(sparse, arrays_for_binary_ufunc):
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
-@pytest.mark.xfail(reason="Series.__array_ufunc__")
def test_binary_ufunc_drops_series_name(ufunc, sparse,
arrays_for_binary_ufunc):
# Drop the names when they differ.
@@ -251,3 +243,70 @@ def test_binary_ufunc_drops_series_name(ufunc, sparse,
result = ufunc(s1, s2)
assert result.name is None
+
+
+def test_object_series_ok():
+ class Dummy:
+ def __init__(self, value):
+ self.value = value
+
+ def __add__(self, other):
+ return self.value + other.value
+
+ arr = np.array([Dummy(0), Dummy(1)])
+ ser = pd.Series(arr)
+ tm.assert_series_equal(np.add(ser, ser), pd.Series(np.add(ser, arr)))
+ tm.assert_series_equal(np.add(ser, Dummy(1)),
+ pd.Series(np.add(ser, Dummy(1))))
+
+
+@pytest.mark.parametrize('values', [
+ pd.array([1, 3, 2]),
+ pytest.param(
+ pd.array([1, 10, 0], dtype='Sparse[int]'),
+ marks=pytest.mark.xfail(resason='GH-27080. Bug in SparseArray')
+ ),
+ pd.to_datetime(['2000', '2010', '2001']),
+ pd.to_datetime(['2000', '2010', '2001']).tz_localize("CET"),
+ pd.to_datetime(['2000', '2010', '2001']).to_period(freq="D"),
+
+])
+def test_reduce(values):
+ a = pd.Series(values)
+ assert np.maximum.reduce(a) == values[1]
+
+
+@pytest.mark.parametrize('type_', [
+ list,
+ deque,
+ tuple,
+])
+def test_binary_ufunc_other_types(type_):
+ a = pd.Series([1, 2, 3], name='name')
+ b = type_([3, 4, 5])
+
+ result = np.add(a, b)
+ expected = pd.Series(np.add(a.to_numpy(), b), name='name')
+ tm.assert_series_equal(result, expected)
+
+
+def test_object_dtype_ok():
+
+ class Thing:
+ def __init__(self, value):
+ self.value = value
+
+ def __add__(self, other):
+ other = getattr(other, 'value', other)
+ return type(self)(self.value + other)
+
+ def __eq__(self, other):
+ return type(other) is Thing and self.value == other.value
+
+ def __repr__(self):
+ return 'Thing({})'.format(self.value)
+
+ s = pd.Series([Thing(1), Thing(2)])
+ result = np.add(s, Thing(1))
+ expected = pd.Series([Thing(2), Thing(3)])
+ tm.assert_series_equal(result, expected)
| This PR:
* adds a basic (but incomplete) `__array_ufunc__` implementation for `IntegerArray` (to be able to check it is correctly used from Series for ExtensionArrays)
* adds `Series.__array_ufunc__` (not yet for `Index`)
---
@TomAugspurger revived my branch, will try to work on it a bit further this afternoon
What this already does is a basic implementation of the protocol for IntegerArray for simple ufuncs (call) for all IntegerArrays, and Series dispatching to the underlying values.
One question is if we want to force EA authors to implement an `__array_ufunc__` (eg by having a default implementation returning NotImplemented). | https://api.github.com/repos/pandas-dev/pandas/pulls/23293 | 2018-10-23T09:05:32Z | 2019-07-01T21:20:05Z | 2019-07-01T21:20:05Z | 2019-07-02T14:12:14Z |
BUG: Let MultiIndex.set_levels accept any iterable (#23273) | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index d0aa156cf5059..de36e1d0794b4 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -1004,6 +1004,7 @@ Indexing
- Bug in :meth:`DataFrame.loc` when indexing with an :class:`IntervalIndex` (:issue:`19977`)
- :class:`Index` no longer mangles ``None``, ``NaN`` and ``NaT``, i.e. they are treated as three different keys. However, for numeric Index all three are still coerced to a ``NaN`` (:issue:`22332`)
- Bug in `scalar in Index` if scalar is a float while the ``Index`` is of integer dtype (:issue:`22085`)
+- Bug in `MultiIndex.set_levels` when levels value is not subscriptable (:issue:`23273`)
Missing
^^^^^^^
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 3cccb65503378..79ac32d2f6a0b 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -389,6 +389,9 @@ def set_levels(self, levels, level=None, inplace=False,
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
+ if is_list_like(levels) and not isinstance(levels, Index):
+ levels = list(levels)
+
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py
index 99ab54a83636c..ff2170839b012 100644
--- a/pandas/tests/indexes/multi/test_get_set.py
+++ b/pandas/tests/indexes/multi/test_get_set.py
@@ -414,3 +414,17 @@ def test_set_value_keeps_names():
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
+
+
+def test_set_levels_with_iterable():
+ # GH23273
+ sizes = [1, 2, 3]
+ colors = ['black'] * 3
+ index = pd.MultiIndex.from_arrays([sizes, colors], names=['size', 'color'])
+
+ result = index.set_levels(map(int, ['3', '2', '1']), level='size')
+
+ expected_sizes = [3, 2, 1]
+ expected = pd.MultiIndex.from_arrays([expected_sizes, colors],
+ names=['size', 'color'])
+ tm.assert_index_equal(result, expected)
| - [x] closes #23273
- [x] tests added and passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Quite surprisingly, `is_list_like` accepts a wide range of iterables including non-subscriptable objects. Later in the code `set_levels` implicitly supposes that passed instance can return its 0'th element by index, which is not always the case, as provided in the example of the #23273.
To address the issue, a new helper function `is_subscriptable` is added, and if passed levels look like a list, but are not subscriptable, they are explicltly converted to a list with `list(levels)`
| https://api.github.com/repos/pandas-dev/pandas/pulls/23291 | 2018-10-23T08:35:35Z | 2018-10-25T11:33:42Z | 2018-10-25T11:33:42Z | 2018-10-25T20:09:20Z |
Revert "implement TimedeltaArray asm8, to_timedelta64 (#23205)" | diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py
index b0b1397086e02..ac90483513af5 100644
--- a/pandas/core/arrays/datetimes.py
+++ b/pandas/core/arrays/datetimes.py
@@ -707,22 +707,6 @@ def tz_localize(self, tz, ambiguous='raise', errors='raise'):
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
- def to_datetime64(self):
- """
- Return numpy datetime64[ns] representation of self. For timezone-aware
- cases, the returned array represents UTC timestamps.
-
- Returns
- -------
- ndarray[datetime64[ns]]
- """
- return self.asi8.view('M8[ns]')
-
- @property
- def asm8(self):
- """Vectorized analogue of Timestamp.asm8"""
- return self.to_datetime64()
-
def to_pydatetime(self):
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index 522766804d4a1..eb7dabdc03b0b 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -352,31 +352,6 @@ def to_pytimedelta(self):
"""
return tslibs.ints_to_pytimedelta(self.asi8)
- def to_timedelta64(self):
- """
- Return numpy array with timedelta64[ns] dtype
-
- Returns
- -------
- ndarray[timedelta64[ns]]
-
- Notes
- -----
- This returns a view on self, not a copy.
-
- See also
- --------
- Timedelta.to_timedelta64
- """
- return self.asi8.view('m8[ns]')
-
- @property
- def asm8(self):
- """
- Vectorized analogue of Timedelta.asm8
- """
- return self.to_timedelta64()
-
days = _field_accessor("days", "days",
" Number of days for each element. ")
seconds = _field_accessor("seconds", "seconds",
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py
index eeb173dfa9bb4..d0099aed00285 100644
--- a/pandas/tests/arrays/test_datetimelike.py
+++ b/pandas/tests/arrays/test_datetimelike.py
@@ -106,30 +106,6 @@ def test_to_period(self, datetime_index, freqstr):
# an EA-specific tm.assert_ function
tm.assert_index_equal(pd.Index(result), pd.Index(expected))
- def test_asm8(self, datetime_index):
- dti = datetime_index
- arr = DatetimeArrayMixin(dti)
-
- expected = np.array([x.asm8 for x in dti], dtype='M8[ns]')
-
- result = dti.asm8
- tm.assert_numpy_array_equal(result, expected)
-
- result = arr.asm8
- tm.assert_numpy_array_equal(result, expected)
-
- def test_to_datetime64(self, datetime_index):
- dti = datetime_index
- arr = DatetimeArrayMixin(dti)
-
- expected = np.array([x.asm8 for x in dti], dtype='M8[ns]')
-
- result = dti.to_datetime64()
- tm.assert_numpy_array_equal(result, expected)
-
- result = arr.to_datetime64()
- tm.assert_numpy_array_equal(result, expected)
-
@pytest.mark.parametrize('propname', pd.DatetimeIndex._bool_ops)
def test_bool_properties(self, datetime_index, propname):
# in this case _bool_ops is just `is_leap_year`
@@ -172,30 +148,6 @@ def test_astype_object(self):
assert asobj.dtype == 'O'
assert list(asobj) == list(tdi)
- def test_asm8(self):
- tdi = pd.TimedeltaIndex(['1 Hour', '3 Hours'])
- arr = TimedeltaArrayMixin(tdi)
-
- expected = np.array([3600, 10800], dtype='m8[ns]') * 1e9
-
- result = tdi.asm8
- tm.assert_numpy_array_equal(result, expected)
-
- result = arr.asm8
- tm.assert_numpy_array_equal(result, expected)
-
- def test_to_timedelta64(self):
- tdi = pd.TimedeltaIndex(['1 Hour', '3 Hours'])
- arr = TimedeltaArrayMixin(tdi)
-
- expected = np.array([3600, 10800], dtype='m8[ns]') * 1e9
-
- result = tdi.to_timedelta64()
- tm.assert_numpy_array_equal(result, expected)
-
- result = arr.to_timedelta64()
- tm.assert_numpy_array_equal(result, expected)
-
def test_to_pytimedelta(self, timedelta_index):
tdi = timedelta_index
arr = TimedeltaArrayMixin(tdi)
| This reverts commit 6e507133916fc5a8434e21eb267043fc9a3ba8d7 (https://github.com/pandas-dev/pandas/pull/23205) | https://api.github.com/repos/pandas-dev/pandas/pulls/23290 | 2018-10-23T06:04:02Z | 2018-10-23T06:04:20Z | 2018-10-23T06:04:20Z | 2018-10-23T06:04:30Z |
BUG GH23282 calling min on series of NaT returns NaT | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 768868d585721..31952ea19a1ba 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -1020,6 +1020,7 @@ Datetimelike
- Bug in :func:`to_datetime` with an :class:`Index` argument that would drop the ``name`` from the result (:issue:`21697`)
- Bug in :class:`PeriodIndex` where adding or subtracting a :class:`timedelta` or :class:`Tick` object produced incorrect results (:issue:`22988`)
- Bug in :func:`date_range` when decrementing a start date to a past end date by a negative frequency (:issue:`23270`)
+- Bug in :meth:`Series.min` which would return ``NaN`` instead of ``NaT`` when called on a series of ``NaT`` (:issue:`23282`)
- Bug in :func:`DataFrame.combine` with datetimelike values raising a TypeError (:issue:`23079`)
Timedelta
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 2884bc1a19491..afba433f0e391 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -244,7 +244,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
elif is_float_dtype(dtype):
dtype_max = np.float64
- return values, mask, dtype, dtype_max
+ return values, mask, dtype, dtype_max, fill_value
def _isfinite(values):
@@ -266,16 +266,21 @@ def _view_if_needed(values):
return values
-def _wrap_results(result, dtype):
+def _wrap_results(result, dtype, fill_value=None):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
+ assert not isna(fill_value), "Expected non-null fill_value"
+ if result == fill_value:
+ result = np.nan
result = tslibs.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
+ if result == fill_value:
+ result = np.nan
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
@@ -346,8 +351,8 @@ def nanany(values, axis=None, skipna=True, mask=None):
>>> nanops.nanany(s)
False
"""
- values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna,
- mask=mask)
+ values, mask, dtype, _, _ = _get_values(values, skipna, False, copy=skipna,
+ mask=mask)
return values.any(axis)
@@ -379,8 +384,8 @@ def nanall(values, axis=None, skipna=True, mask=None):
>>> nanops.nanall(s)
False
"""
- values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna,
- mask=mask)
+ values, mask, dtype, _, _ = _get_values(values, skipna, True, copy=skipna,
+ mask=mask)
return values.all(axis)
@@ -409,7 +414,8 @@ def nansum(values, axis=None, skipna=True, min_count=0, mask=None):
>>> nanops.nansum(s)
3.0
"""
- values, mask, dtype, dtype_max = _get_values(values, skipna, 0, mask=mask)
+ values, mask, dtype, dtype_max, _ = _get_values(values,
+ skipna, 0, mask=mask)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
@@ -448,7 +454,8 @@ def nanmean(values, axis=None, skipna=True, mask=None):
>>> nanops.nanmean(s)
1.5
"""
- values, mask, dtype, dtype_max = _get_values(values, skipna, 0, mask=mask)
+ values, mask, dtype, dtype_max, _ = _get_values(
+ values, skipna, 0, mask=mask)
dtype_sum = dtype_max
dtype_count = np.float64
if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype):
@@ -501,7 +508,7 @@ def get_median(x):
return np.nan
return np.nanmedian(x[mask])
- values, mask, dtype, dtype_max = _get_values(values, skipna, mask=mask)
+ values, mask, dtype, dtype_max, _ = _get_values(values, skipna, mask=mask)
if not is_float_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
@@ -705,7 +712,8 @@ def nansem(values, axis=None, skipna=True, ddof=1, mask=None):
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch()
def reduction(values, axis=None, skipna=True, mask=None):
- values, mask, dtype, dtype_max = _get_values(
+
+ values, mask, dtype, dtype_max, fill_value = _get_values(
values, skipna, fill_value_typ=fill_value_typ, mask=mask)
if ((axis is not None and values.shape[axis] == 0) or
@@ -719,7 +727,7 @@ def reduction(values, axis=None, skipna=True, mask=None):
else:
result = getattr(values, meth)(axis)
- result = _wrap_results(result, dtype)
+ result = _wrap_results(result, dtype, fill_value)
return _maybe_null_out(result, axis, mask)
reduction.__name__ = 'nan' + meth
@@ -753,8 +761,8 @@ def nanargmax(values, axis=None, skipna=True, mask=None):
>>> nanops.nanargmax(s)
4
"""
- values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf',
- mask=mask)
+ values, mask, dtype, _, _ = _get_values(
+ values, skipna, fill_value_typ='-inf', mask=mask)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@@ -783,8 +791,8 @@ def nanargmin(values, axis=None, skipna=True, mask=None):
>>> nanops.nanargmin(s)
0
"""
- values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf',
- mask=mask)
+ values, mask, dtype, _, _ = _get_values(
+ values, skipna, fill_value_typ='+inf', mask=mask)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index 7f8bd375cb1a4..d5ceac056b195 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -510,3 +510,21 @@ def test_dt_timetz_accessor(self, tz_naive_fixture):
time(22, 14, tzinfo=tz)])
result = s.dt.timetz
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize('nat', [
+ pd.Series([pd.NaT, pd.NaT]),
+ pd.Series([pd.NaT, pd.Timedelta('nat')]),
+ pd.Series([pd.Timedelta('nat'), pd.Timedelta('nat')])])
+ def test_minmax_nat_series(self, nat):
+ # GH 23282
+ assert nat.min() is pd.NaT
+ assert nat.max() is pd.NaT
+
+ @pytest.mark.parametrize('nat', [
+ # GH 23282
+ pd.DataFrame([pd.NaT, pd.NaT]),
+ pd.DataFrame([pd.NaT, pd.Timedelta('nat')]),
+ pd.DataFrame([pd.Timedelta('nat'), pd.Timedelta('nat')])])
+ def test_minmax_nat_dataframe(self, nat):
+ assert nat.min()[0] is pd.NaT
+ assert nat.max()[0] is pd.NaT
| - [X] closes #23282
- [X] tests passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
For `max`, `NaT` values are filled with the lowest possible value. For `min`, they are filled with the highest possible value. The problem is that only the lowest possible value is recognized as `NaT`. Since `nanops.py` is responsible for assigning the highest value to `NaT` when min is called, it should also be responsible for translating it to `NaT` when appropriate. | https://api.github.com/repos/pandas-dev/pandas/pulls/23289 | 2018-10-23T03:38:54Z | 2018-10-28T22:31:14Z | 2018-10-28T22:31:14Z | 2018-10-28T22:31:19Z |
CLN: simplify try_coerce_args | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index f89d4c51c9a6f..5ce8a9103f008 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -418,7 +418,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None):
# fillna, but if we cannot coerce, then try again as an ObjectBlock
try:
- values, _, _, _ = self._try_coerce_args(self.values, value)
+ values, _ = self._try_coerce_args(self.values, value)
blocks = self.putmask(mask, value, inplace=inplace)
blocks = [b.make_block(values=self._try_coerce_result(b.values))
for b in blocks]
@@ -745,7 +745,7 @@ def _try_coerce_args(self, values, other):
type(other).__name__,
type(self).__name__.lower().replace('Block', '')))
- return values, False, other, False
+ return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
@@ -795,8 +795,8 @@ def replace(self, to_replace, value, inplace=False, filter=None,
# try to replace, if we raise an error, convert to ObjectBlock and
# retry
try:
- values, _, to_replace, _ = self._try_coerce_args(self.values,
- to_replace)
+ values, to_replace = self._try_coerce_args(self.values,
+ to_replace)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
@@ -853,7 +853,7 @@ def setitem(self, indexer, value):
# coerce if block dtype can store value
values = self.values
try:
- values, _, value, _ = self._try_coerce_args(values, value)
+ values, value = self._try_coerce_args(values, value)
# can keep its own dtype
if hasattr(value, 'dtype') and is_dtype_equal(values.dtype,
value.dtype):
@@ -985,7 +985,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0,
new = self.fill_value
if self._can_hold_element(new):
- _, _, new, _ = self._try_coerce_args(new_values, new)
+ _, new = self._try_coerce_args(new_values, new)
if transpose:
new_values = new_values.T
@@ -1193,7 +1193,7 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
return [self.copy()]
values = self.values if inplace else self.values.copy()
- values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
+ values, fill_value = self._try_coerce_args(values, fill_value)
values = missing.interpolate_2d(values, method=method, axis=axis,
limit=limit, fill_value=fill_value,
dtype=self.dtype)
@@ -1366,8 +1366,7 @@ def func(cond, values, other):
if cond.ravel().all():
return values
- values, values_mask, other, other_mask = self._try_coerce_args(
- values, other)
+ values, other = self._try_coerce_args(values, other)
try:
return self._try_coerce_result(expressions.where(
@@ -1477,7 +1476,7 @@ def quantile(self, qs, interpolation='linear', axis=0, axes=None):
"""
kw = {'interpolation': interpolation}
values = self.get_values()
- values, _, _, _ = self._try_coerce_args(values, values)
+ values, _ = self._try_coerce_args(values, values)
def _nanpercentile1D(values, mask, q, **kw):
# mask is Union[ExtensionArray, ndarray]
@@ -1714,7 +1713,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0,
# use block's copy logic.
# .values may be an Index which does shallow copy by default
new_values = self.values if inplace else self.copy().values
- new_values, _, new, _ = self._try_coerce_args(new_values, new)
+ new_values, new = self._try_coerce_args(new_values, new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
@@ -2129,35 +2128,28 @@ def _try_coerce_args(self, values, other):
Returns
-------
- base-type values, values mask, base-type other, other mask
+ base-type values, base-type other
"""
-
- values_mask = isna(values)
values = values.view('i8')
- other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslibs.iNaT
- other_mask = True
elif isinstance(other, Timedelta):
- other_mask = isna(other)
other = other.value
elif isinstance(other, timedelta):
other = Timedelta(other).value
elif isinstance(other, np.timedelta64):
- other_mask = isna(other)
other = Timedelta(other).value
elif hasattr(other, 'dtype') and is_timedelta64_dtype(other):
- other_mask = isna(other)
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError
- return values, values_mask, other, other_mask
+ return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
@@ -2343,7 +2335,7 @@ def _try_coerce_args(self, values, other):
# to store DatetimeTZBlock as object
other = other.astype(object).values
- return values, False, other, False
+ return values, other
def should_store(self, value):
return not (issubclass(value.dtype.type,
@@ -2682,33 +2674,29 @@ def _try_coerce_args(self, values, other):
Returns
-------
- base-type values, values mask, base-type other, other mask
+ base-type values, base-type other
"""
- values_mask = isna(values)
values = values.view('i8')
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslibs.iNaT
- other_mask = True
elif isinstance(other, (datetime, np.datetime64, date)):
other = self._box_func(other)
if getattr(other, 'tz') is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a "
"naive Block")
- other_mask = isna(other)
other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and is_datetime64_dtype(other):
- other_mask = isna(other)
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError
- return values, values_mask, other, other_mask
+ return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
@@ -2855,9 +2843,8 @@ def _try_coerce_args(self, values, other):
Returns
-------
- base-type values, values mask, base-type other, other mask
+ base-type values, base-type other
"""
- values_mask = _block_shape(isna(values), ndim=self.ndim)
# asi8 is a view, needs copy
values = _block_shape(values.asi8, ndim=self.ndim)
@@ -2869,11 +2856,9 @@ def _try_coerce_args(self, values, other):
elif (is_null_datelike_scalar(other) or
(lib.is_scalar(other) and isna(other))):
other = tslibs.iNaT
- other_mask = True
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
- other_mask = _block_shape(isna(other), ndim=self.ndim)
other = _block_shape(other.asi8, ndim=self.ndim)
elif isinstance(other, (np.datetime64, datetime, date)):
other = tslibs.Timestamp(other)
@@ -2882,12 +2867,11 @@ def _try_coerce_args(self, values, other):
# test we can have an equal time zone
if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
- other_mask = isna(other)
other = other.value
else:
raise TypeError
- return values, values_mask, other, other_mask
+ return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index b6a83b786bab2..cdf35ea96588a 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -300,14 +300,14 @@ def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
- none_coerced = block._try_coerce_args(block.values, None)[2]
+ none_coerced = block._try_coerce_args(block.values, None)[1]
assert pd.Timestamp(none_coerced) is pd.NaT
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
- coerced = block._try_coerce_args(block.values, val)[2]
+ coerced = block._try_coerce_args(block.values, val)[1]
assert np.int64 == type(coerced)
assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)
| Small simplication available following #23132. | https://api.github.com/repos/pandas-dev/pandas/pulls/23288 | 2018-10-23T03:22:49Z | 2018-10-23T21:42:26Z | 2018-10-23T21:42:26Z | 2018-10-23T21:46:28Z |
Preserve EA dtype in DataFrame.stack | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index fd7d88bd52383..0c2a176869829 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -853,6 +853,7 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your
- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`)
- :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`).
- Support for reduction operations such as ``sum``, ``mean`` via opt-in base class method override (:issue:`22762`)
+- :meth:`DataFrame.stack` no longer converts to object dtype for DataFrames where each column has the same extension dtype. The output Series will have the same dtype as the columns (:issue:`23077`).
- :meth:`Series.unstack` and :meth:`DataFrame.unstack` no longer convert extension arrays to object-dtype ndarrays. Each column in the output ``DataFrame`` will now have the same dtype as the input (:issue:`23077`).
- Bug when grouping :meth:`Dataframe.groupby()` and aggregating on ``ExtensionArray`` it was not returning the actual ``ExtensionArray`` dtype (:issue:`23227`).
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 7a55b652054ed..1f2a1ee52159e 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -35,9 +35,9 @@
is_numeric_v_string_like, is_extension_type,
is_extension_array_dtype,
is_list_like,
- is_sparse,
is_re,
is_re_compilable,
+ is_sparse,
pandas_dtype)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 2dca7cf0e6aa3..065728fb239ae 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -494,8 +494,9 @@ def factorize(index):
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type([
- col for _, col in frame.iteritems()
+ col._values for _, col in frame.iteritems()
])
+ new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame.values.ravel()
@@ -624,16 +625,32 @@ def _convert_level_number(level_num, columns):
slice_len = loc.stop - loc.start
if slice_len != levsize:
- chunk = this.loc[:, this.columns[loc]]
+ chunk = this[this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
- if frame._is_mixed_type:
- value_slice = this.loc[:, this.columns[loc]].values
+ if (frame._is_homogeneous_type and
+ is_extension_array_dtype(frame.dtypes.iloc[0])):
+ dtype = this[this.columns[loc]].dtypes.iloc[0]
+ subset = this[this.columns[loc]]
+
+ value_slice = dtype.construct_array_type()._concat_same_type(
+ [x._values for _, x in subset.iteritems()]
+ )
+ N, K = this.shape
+ idx = np.arange(N * K).reshape(K, N).T.ravel()
+ value_slice = value_slice.take(idx)
+
+ elif frame._is_mixed_type:
+ value_slice = this[this.columns[loc]].values
else:
value_slice = this.values[:, loc]
- new_data[key] = value_slice.ravel()
+ if value_slice.ndim > 1:
+ # i.e. not extension
+ value_slice = value_slice.ravel()
+
+ new_data[key] = value_slice
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
@@ -971,3 +988,38 @@ def make_axis_dummies(frame, axis='minor', transform=None):
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
+
+
+def _reorder_for_extension_array_stack(arr, n_rows, n_columns):
+ """
+ Re-orders the values when stacking multiple extension-arrays.
+
+ The indirect stacking method used for EAs requires a followup
+ take to get the order correct.
+
+ Parameters
+ ----------
+ arr : ExtensionArray
+ n_rows, n_columns : int
+ The number of rows and columns in the original DataFrame.
+
+ Returns
+ -------
+ taken : ExtensionArray
+ The original `arr` with elements re-ordered appropriately
+
+ Examples
+ --------
+ >>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])
+ >>> _reorder_for_extension_array_stack(arr, 2, 3)
+ array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')
+
+ >>> _reorder_for_extension_array_stack(arr, 3, 2)
+ array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
+ """
+ # final take to get the order correct.
+ # idx is an indexer like
+ # [c0r0, c1r0, c2r0, ...,
+ # c0r1, c1r1, c2r1, ...]
+ idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()
+ return arr.take(idx)
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index d0e42e69e300f..9904fcd362818 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -173,6 +173,28 @@ def test_merge(self, data, na_value):
dtype=data.dtype)})
self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']])
+ @pytest.mark.parametrize("columns", [
+ ["A", "B"],
+ pd.MultiIndex.from_tuples([('A', 'a'), ('A', 'b')],
+ names=['outer', 'inner']),
+ ])
+ def test_stack(self, data, columns):
+ df = pd.DataFrame({"A": data[:5], "B": data[:5]})
+ df.columns = columns
+ result = df.stack()
+ expected = df.astype(object).stack()
+ # we need a second astype(object), in case the constructor inferred
+ # object -> specialized, as is done for period.
+ expected = expected.astype(object)
+
+ if isinstance(expected, pd.Series):
+ assert result.dtype == df.iloc[:, 0].dtype
+ else:
+ assert all(result.dtypes == df.iloc[:, 0].dtype)
+
+ result = result.astype(object)
+ self.assert_equal(result, expected)
+
@pytest.mark.parametrize("index", [
# Two levels, uniform.
pd.MultiIndex.from_product(([['A', 'B'], ['a', 'b']]),
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 2b1bfecdf8f28..b7c61496f0bf0 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -139,6 +139,15 @@ def test_from_dtype(self, data):
class TestReshaping(BaseJSON, base.BaseReshapingTests):
+
+ @pytest.mark.skip(reason="Different definitions of NA")
+ def test_stack(self):
+ """
+ The test does .astype(object).stack(). If we happen to have
+ any missing values in `data`, then we'll end up with different
+ rows since we consider `{}` NA, but `.astype(object)` doesn't.
+ """
+
@pytest.mark.xfail(reason="dict for NA", strict=True)
def test_unstack(self, data, index):
# The base test has NaN for the expected NA value.
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 54511df4effad..ab3d6ca3b19f7 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -874,6 +874,17 @@ def test_stack_preserve_categorical_dtype(self, ordered, labels):
tm.assert_series_equal(result, expected)
+ def test_stack_preserve_categorical_dtype_values(self):
+ # GH-23077
+ cat = pd.Categorical(['a', 'a', 'b', 'c'])
+ df = pd.DataFrame({"A": cat, "B": cat})
+ result = df.stack()
+ index = pd.MultiIndex.from_product([[0, 1, 2, 3], ['A', 'B']])
+ expected = pd.Series(pd.Categorical(['a', 'a', 'a', 'a',
+ 'b', 'b', 'c', 'c']),
+ index=index)
+ tm.assert_series_equal(result, expected)
+
@pytest.mark.parametrize('level', [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 1)],
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 03143488c3874..10074a2e5ad99 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -736,6 +736,16 @@ def test_astype_bool(self):
assert res['A'].dtype == SparseDtype(np.bool)
assert res['B'].dtype == SparseDtype(np.bool)
+ def test_astype_object(self):
+ # This may change in GH-23125
+ df = pd.DataFrame({"A": SparseArray([0, 1]),
+ "B": SparseArray([0, 1])})
+ result = df.astype(object)
+ dtype = SparseDtype(object, 0)
+ expected = pd.DataFrame({"A": SparseArray([0, 1], dtype=dtype),
+ "B": SparseArray([0, 1], dtype=dtype)})
+ tm.assert_frame_equal(result, expected)
+
def test_fillna(self, float_frame_fill0, float_frame_fill0_dense):
df = float_frame_fill0.reindex(lrange(5))
dense = float_frame_fill0_dense.reindex(lrange(5))
| closes #23077
There were two bugs in master (not present in 0.23.4), probably from the SparseArray PR
1. We need to unbox the EA values from Series before passing to `EA._concat_same_type`
2. We need to followup with a `take` to get the correct order.
| https://api.github.com/repos/pandas-dev/pandas/pulls/23285 | 2018-10-22T20:38:21Z | 2018-11-08T13:17:19Z | 2018-11-08T13:17:19Z | 2018-11-08T13:17:20Z |
ENH: Support EAs in Series.unstack | diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index bda486dba3b0f..67fdfb82e72c0 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -49,21 +49,33 @@ def time_unstack(self):
class Unstack(object):
- def setup(self):
+ params = ['int', 'category']
+
+ def setup(self, dtype):
m = 100
n = 1000
levels = np.arange(m)
index = MultiIndex.from_product([levels] * 2)
columns = np.arange(n)
- values = np.arange(m * m * n).reshape(m * m, n)
+ if dtype == 'int':
+ values = np.arange(m * m * n).reshape(m * m, n)
+ else:
+ # the category branch is ~20x slower than int. So we
+ # cut down the size a bit. Now it's only ~3x slower.
+ n = 50
+ columns = columns[:n]
+ indices = np.random.randint(0, 52, size=(m * m, n))
+ values = np.take(list(string.ascii_letters), indices)
+ values = [pd.Categorical(v) for v in values.T]
+
self.df = DataFrame(values, index, columns)
self.df2 = self.df.iloc[:-1]
- def time_full_product(self):
+ def time_full_product(self, dtype):
self.df.unstack()
- def time_without_last_row(self):
+ def time_without_last_row(self, dtype):
self.df2.unstack()
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 33d45f8d4444d..f6b619defc435 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -853,7 +853,7 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your
- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`)
- :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`).
- Support for reduction operations such as ``sum``, ``mean`` via opt-in base class method override (:issue:`22762`)
-- :meth:`Series.unstack` no longer converts extension arrays to object-dtype ndarrays. The output ``DataFrame`` will now have the same dtype as the input. This changes behavior for Categorical and Sparse data (:issue:`23077`).
+- :meth:`Series.unstack` and :meth:`DataFrame.unstack` no longer convert extension arrays to object-dtype ndarrays. Each column in the output ``DataFrame`` will now have the same dtype as the input (:issue:`23077`).
- Bug when grouping :meth:`Dataframe.groupby()` and aggregating on ``ExtensionArray`` it was not returning the actual ``ExtensionArray`` dtype (:issue:`23227`).
.. _whatsnew_0240.api.incompatibilities:
@@ -1090,6 +1090,7 @@ Categorical
- Bug when indexing with a boolean-valued ``Categorical``. Now a boolean-valued ``Categorical`` is treated as a boolean mask (:issue:`22665`)
- Constructing a :class:`CategoricalIndex` with empty values and boolean categories was raising a ``ValueError`` after a change to dtype coercion (:issue:`22702`).
- Bug in :meth:`Categorical.take` with a user-provided ``fill_value`` not encoding the ``fill_value``, which could result in a ``ValueError``, incorrect results, or a segmentation fault (:issue:`23296`).
+- In meth:`Series.unstack`, specifying a ``fill_value`` not present in the categories now raises a ``TypeError`` rather than ignoring the ``fill_value`` (:issue:`23284`)
- Bug when resampling :meth:`Dataframe.resample()` and aggregating on categorical data, the categorical dtype was getting lost. (:issue:`23227`)
Datetimelike
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index e84953f3dab56..7a55b652054ed 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+import functools
import warnings
import inspect
import re
@@ -34,6 +35,7 @@
is_numeric_v_string_like, is_extension_type,
is_extension_array_dtype,
is_list_like,
+ is_sparse,
is_re,
is_re_compilable,
pandas_dtype)
@@ -632,7 +634,10 @@ def _astype(self, dtype, copy=False, errors='raise', values=None,
return self
if klass is None:
- if dtype == np.object_:
+ if is_sparse(self.values):
+ # special case sparse, Series[Sparse].astype(object) is sparse
+ klass = ExtensionBlock
+ elif is_object_dtype(dtype):
klass = ObjectBlock
elif is_extension_array_dtype(dtype):
klass = ExtensionBlock
@@ -1429,7 +1434,7 @@ def equals(self, other):
return False
return array_equivalent(self.values, other.values)
- def _unstack(self, unstacker_func, new_columns):
+ def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
"""Return a list of unstacked blocks of self
Parameters
@@ -1438,6 +1443,10 @@ def _unstack(self, unstacker_func, new_columns):
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
+ n_rows : int
+ Only used in ExtensionBlock.unstack
+ fill_value : int
+ Only used in ExtensionBlock.unstack
Returns
-------
@@ -1731,7 +1740,7 @@ def _slice(self, slicer):
def _try_cast_result(self, result, dtype=None):
return result
- def _unstack(self, unstacker_func, new_columns):
+ def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
"""Return a list of unstacked blocks of self
Parameters
@@ -1740,6 +1749,10 @@ def _unstack(self, unstacker_func, new_columns):
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
+ n_rows : int
+ Only used in ExtensionBlock.unstack
+ fill_value : int
+ Only used in ExtensionBlock.unstack
Returns
-------
@@ -1751,11 +1764,11 @@ def _unstack(self, unstacker_func, new_columns):
# NonConsolidatable blocks can have a single item only, so we return
# one block per item
unstacker = unstacker_func(self.values.T)
- new_items = unstacker.get_new_columns()
- new_placement = new_columns.get_indexer(new_items)
- new_values, mask = unstacker.get_new_values()
- mask = mask.any(0)
+ new_placement, new_values, mask = self._get_unstack_items(
+ unstacker, new_columns
+ )
+
new_values = new_values.T[mask]
new_placement = new_placement[mask]
@@ -1763,6 +1776,38 @@ def _unstack(self, unstacker_func, new_columns):
for vals, place in zip(new_values, new_placement)]
return blocks, mask
+ def _get_unstack_items(self, unstacker, new_columns):
+ """
+ Get the placement, values, and mask for a Block unstack.
+
+ This is shared between ObjectBlock and ExtensionBlock. They
+ differ in that ObjectBlock passes the values, while ExtensionBlock
+ passes the dummy ndarray of positions to be used by a take
+ later.
+
+ Parameters
+ ----------
+ unstacker : pandas.core.reshape.reshape._Unstacker
+ new_columns : Index
+ All columns of the unstacked BlockManager.
+
+ Returns
+ -------
+ new_placement : ndarray[int]
+ The placement of the new columns in `new_columns`.
+ new_values : Union[ndarray, ExtensionArray]
+ The first return value from _Unstacker.get_new_values.
+ mask : ndarray[bool]
+ The second return value from _Unstacker.get_new_values.
+ """
+ # shared with ExtensionBlock
+ new_items = unstacker.get_new_columns()
+ new_placement = new_columns.get_indexer(new_items)
+ new_values, mask = unstacker.get_new_values()
+
+ mask = mask.any(0)
+ return new_placement, new_values, mask
+
class ExtensionBlock(NonConsolidatableMixIn, Block):
"""Block for holding extension types.
@@ -1950,6 +1995,30 @@ def shift(self, periods, axis=0):
def _ftype(self):
return getattr(self.values, '_pandas_ftype', Block._ftype)
+ def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):
+ # ExtensionArray-safe unstack.
+ # We override ObjectBlock._unstack, which unstacks directly on the
+ # values of the array. For EA-backed blocks, this would require
+ # converting to a 2-D ndarray of objects.
+ # Instead, we unstack an ndarray of integer positions, followed by
+ # a `take` on the actual values.
+ dummy_arr = np.arange(n_rows)
+ dummy_unstacker = functools.partial(unstacker_func, fill_value=-1)
+ unstacker = dummy_unstacker(dummy_arr)
+
+ new_placement, new_values, mask = self._get_unstack_items(
+ unstacker, new_columns
+ )
+
+ blocks = [
+ self.make_block_same_class(
+ self.values.take(indices, allow_fill=True,
+ fill_value=fill_value),
+ [place])
+ for indices, place in zip(new_values.T, new_placement)
+ ]
+ return blocks, mask
+
class NumericBlock(Block):
__slots__ = ()
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index fc3a12a9da82a..0519c5e5abe33 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -1405,18 +1405,21 @@ def canonicalize(block):
return all(block.equals(oblock)
for block, oblock in zip(self_blocks, other_blocks))
- def unstack(self, unstacker_func):
+ def unstack(self, unstacker_func, fill_value):
"""Return a blockmanager with all blocks unstacked.
Parameters
----------
unstacker_func : callable
A (partially-applied) ``pd.core.reshape._Unstacker`` class.
+ fill_value : Any
+ fill_value for newly introduced missing values.
Returns
-------
unstacked : BlockManager
"""
+ n_rows = self.shape[-1]
dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items)
new_columns = dummy.get_new_columns()
new_index = dummy.get_new_index()
@@ -1427,7 +1430,10 @@ def unstack(self, unstacker_func):
blocks, mask = blk._unstack(
partial(unstacker_func,
value_columns=self.items[blk.mgr_locs.indexer]),
- new_columns)
+ new_columns,
+ n_rows,
+ fill_value
+ )
new_blocks.extend(blocks)
columns_mask.extend(mask)
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index d3b677a1df2a3..2dca7cf0e6aa3 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -12,12 +12,12 @@
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int, is_bool_dtype, is_extension_array_dtype, is_list_like,
- is_object_dtype, is_sparse, needs_i8_conversion)
+ is_object_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import notna
from pandas import compat
import pandas.core.algorithms as algos
-from pandas.core.arrays import Categorical, SparseArray
+from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import _factorize_from_iterable
from pandas.core.frame import DataFrame
from pandas.core.index import Index, MultiIndex
@@ -82,28 +82,15 @@ class _Unstacker(object):
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None, constructor=None):
- self.is_categorical = None
- self.is_sparse = is_sparse(values)
if values.ndim == 1:
- if isinstance(values, Categorical):
- self.is_categorical = values
- values = np.array(values)
- elif self.is_sparse:
- # XXX: Makes SparseArray *dense*, but it's supposedly
- # a single column at a time, so it's "doable"
- values = values.values
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if constructor is None:
- if self.is_sparse:
- self.constructor = SparseDataFrame
- else:
- self.constructor = DataFrame
- else:
- self.constructor = constructor
+ constructor = DataFrame
+ self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
@@ -174,14 +161,6 @@ def get_result(self):
columns = self.get_new_columns()
index = self.get_new_index()
- # may need to coerce categoricals here
- if self.is_categorical is not None:
- categories = self.is_categorical.categories
- ordered = self.is_categorical.ordered
- values = [Categorical(values[:, i], categories=categories,
- ordered=ordered)
- for i in range(values.shape[-1])]
-
return self.constructor(values, index=index, columns=columns)
def get_new_values(self):
@@ -339,6 +318,7 @@ def _unstack_multiple(data, clocs, fill_value=None):
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
+
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
new_levels = clevels
new_names = cnames
@@ -394,6 +374,8 @@ def unstack(obj, level, fill_value=None):
else:
return obj.T.stack(dropna=False)
else:
+ if is_extension_array_dtype(obj.dtype):
+ return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value,
constructor=obj._constructor_expanddim)
@@ -404,7 +386,8 @@ def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
unstacker = partial(_Unstacker, index=obj.index,
level=level, fill_value=fill_value)
- blocks = obj._data.unstack(unstacker)
+ blocks = obj._data.unstack(unstacker,
+ fill_value=fill_value)
return obj._constructor(blocks)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
@@ -414,6 +397,52 @@ def _unstack_frame(obj, level, fill_value=None):
return unstacker.get_result()
+def _unstack_extension_series(series, level, fill_value):
+ """
+ Unstack an ExtensionArray-backed Series.
+
+ The ExtensionDtype is preserved.
+
+ Parameters
+ ----------
+ series : Series
+ A Series with an ExtensionArray for values
+ level : Any
+ The level name or number.
+ fill_value : Any
+ The user-level (not physical storage) fill value to use for
+ missing values introduced by the reshape. Passed to
+ ``series.values.take``.
+
+ Returns
+ -------
+ DataFrame
+ Each column of the DataFrame will have the same dtype as
+ the input Series.
+ """
+ # Implementation note: the basic idea is to
+ # 1. Do a regular unstack on a dummy array of integers
+ # 2. Followup with a columnwise take.
+ # We use the dummy take to discover newly-created missing values
+ # introduced by the reshape.
+ from pandas.core.reshape.concat import concat
+
+ dummy_arr = np.arange(len(series))
+ # fill_value=-1, since we will do a series.values.take later
+ result = _Unstacker(dummy_arr, series.index,
+ level=level, fill_value=-1).get_result()
+
+ out = []
+ values = series.values
+
+ for col, indices in result.iteritems():
+ out.append(Series(values.take(indices.values,
+ allow_fill=True,
+ fill_value=fill_value),
+ name=col, index=result.index))
+ return concat(out, axis='columns', copy=False, keys=result.columns)
+
+
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index 446912b66bf33..d0e42e69e300f 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -1,3 +1,5 @@
+import itertools
+
import numpy as np
import pytest
@@ -170,3 +172,46 @@ def test_merge(self, data, na_value):
[data[0], data[0], data[1], data[2], na_value],
dtype=data.dtype)})
self.assert_frame_equal(res, exp[['ext', 'int1', 'key', 'int2']])
+
+ @pytest.mark.parametrize("index", [
+ # Two levels, uniform.
+ pd.MultiIndex.from_product(([['A', 'B'], ['a', 'b']]),
+ names=['a', 'b']),
+
+ # non-uniform
+ pd.MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('B', 'b')]),
+
+ # three levels, non-uniform
+ pd.MultiIndex.from_product([('A', 'B'), ('a', 'b', 'c'), (0, 1, 2)]),
+ pd.MultiIndex.from_tuples([
+ ('A', 'a', 1),
+ ('A', 'b', 0),
+ ('A', 'a', 0),
+ ('B', 'a', 0),
+ ('B', 'c', 1),
+ ]),
+ ])
+ @pytest.mark.parametrize("obj", ["series", "frame"])
+ def test_unstack(self, data, index, obj):
+ data = data[:len(index)]
+ if obj == "series":
+ ser = pd.Series(data, index=index)
+ else:
+ ser = pd.DataFrame({"A": data, "B": data}, index=index)
+
+ n = index.nlevels
+ levels = list(range(n))
+ # [0, 1, 2]
+ # [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
+ combinations = itertools.chain.from_iterable(
+ itertools.permutations(levels, i) for i in range(1, n)
+ )
+
+ for level in combinations:
+ result = ser.unstack(level=level)
+ assert all(isinstance(result[col].values, type(data))
+ for col in result.columns)
+ expected = ser.astype(object).unstack(level=level)
+ result = result.astype(object)
+
+ self.assert_frame_equal(result, expected)
diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py
index c14bfa359bc64..3c8905c578c4f 100644
--- a/pandas/tests/extension/decimal/array.py
+++ b/pandas/tests/extension/decimal/array.py
@@ -102,7 +102,7 @@ def copy(self, deep=False):
def astype(self, dtype, copy=True):
if isinstance(dtype, type(self.dtype)):
return type(self)(self._data, context=dtype.context)
- return super(DecimalArray, self).astype(dtype, copy)
+ return np.asarray(self, dtype=dtype)
def __setitem__(self, key, value):
if pd.api.types.is_list_like(value):
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index 1c9beefe9e542..af5f6bf0a2f65 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -1,4 +1,5 @@
import decimal
+import math
import operator
import numpy as np
@@ -63,9 +64,23 @@ def data_for_grouping():
class BaseDecimal(object):
def assert_series_equal(self, left, right, *args, **kwargs):
-
- left_na = left.isna()
- right_na = right.isna()
+ def convert(x):
+ # need to convert array([Decimal(NaN)], dtype='object') to np.NaN
+ # because Series[object].isnan doesn't recognize decimal(NaN) as
+ # NA.
+ try:
+ return math.isnan(x)
+ except TypeError:
+ return False
+
+ if left.dtype == 'object':
+ left_na = left.apply(convert)
+ else:
+ left_na = left.isna()
+ if right.dtype == 'object':
+ right_na = right.apply(convert)
+ else:
+ right_na = right.isna()
tm.assert_series_equal(left_na, right_na)
return tm.assert_series_equal(left[~left_na],
diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py
index 778432376e092..2b1bfecdf8f28 100644
--- a/pandas/tests/extension/json/test_json.py
+++ b/pandas/tests/extension/json/test_json.py
@@ -139,7 +139,11 @@ def test_from_dtype(self, data):
class TestReshaping(BaseJSON, base.BaseReshapingTests):
- pass
+ @pytest.mark.xfail(reason="dict for NA", strict=True)
+ def test_unstack(self, data, index):
+ # The base test has NaN for the expected NA value.
+ # this matches otherwise
+ return super().test_unstack(data, index)
class TestGetitem(BaseJSON, base.BaseGetitemTests):
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index ed3cc39052183..54511df4effad 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -277,8 +277,6 @@ def test_unstack_fill_frame_timedelta(self):
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
- @pytest.mark.xfail(reason="GH-23077",
- strict=True)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
@@ -305,7 +303,8 @@ def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
- [('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
+ [('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')],
+ )
# By default missing values will be NaN
result = data.unstack()
@@ -316,9 +315,10 @@ def test_unstack_fill_frame_categorical(self):
index=list('xyz'))
assert_frame_equal(result, expected)
- # Fill with non-category results in NaN entries similar to above
- result = data.unstack(fill_value='d')
- assert_frame_equal(result, expected)
+ # Fill with non-category results in a TypeError
+ msg = r"'fill_value' \('d'\) is not in"
+ with tm.assert_raises_regex(TypeError, msg):
+ data.unstack(fill_value='d')
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
@@ -874,6 +874,21 @@ def test_stack_preserve_categorical_dtype(self, ordered, labels):
tm.assert_series_equal(result, expected)
+ @pytest.mark.parametrize('level', [0, 1])
+ def test_unstack_mixed_extension_types(self, level):
+ index = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 1)],
+ names=['a', 'b'])
+ df = pd.DataFrame({"A": pd.core.arrays.integer_array([0, 1, None]),
+ "B": pd.Categorical(['a', 'a', 'b'])}, index=index)
+
+ result = df.unstack(level=level)
+ expected = df.astype(object).unstack(level=level)
+
+ expected_dtypes = pd.Series([df.A.dtype] * 2 + [df.B.dtype] * 2,
+ index=result.columns)
+ tm.assert_series_equal(result.dtypes, expected_dtypes)
+ tm.assert_frame_equal(result.astype(object), expected)
+
@pytest.mark.parametrize("level", [0, 'baz'])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
diff --git a/pandas/tests/sparse/test_pivot.py b/pandas/tests/sparse/test_pivot.py
index e7eba63e4e0b3..0e71048f51177 100644
--- a/pandas/tests/sparse/test_pivot.py
+++ b/pandas/tests/sparse/test_pivot.py
@@ -47,4 +47,5 @@ def test_pivot_table_multi(self):
values=['D', 'E'])
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values=['D', 'E'])
+ res_dense = res_dense.apply(lambda x: x.astype("Sparse[float64]"))
tm.assert_frame_equal(res_sparse, res_dense)
| Closes https://github.com/pandas-dev/pandas/issues/23077
This prevents ExtensionArray-backed series from being converted to object-dtype in unstack.
The strategy is to do a dummy unstack on an ndarray of integers, which provides the `indices` to `take` later on. We then concat together at the end. This provided decent performance, and seems pretty maintainable in the long run.
I'll post some benchmarks later.
Do we want to do DataFrame.stack() in the same PR? | https://api.github.com/repos/pandas-dev/pandas/pulls/23284 | 2018-10-22T20:09:06Z | 2018-11-07T15:34:58Z | 2018-11-07T15:34:58Z | 2018-11-07T15:35:04Z |
TST: xfail gbq & feather tests for now as version incompat | diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index 73e29e6eb9a6a..88a2fded3500c 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -135,7 +135,9 @@ def test_iterator(self):
(pd.read_csv, 'os', FileNotFoundError, 'csv'),
(pd.read_fwf, 'os', FileNotFoundError, 'txt'),
(pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'),
- (pd.read_feather, 'feather', Exception, 'feather'),
+ pytest.param(
+ pd.read_feather, 'feather', Exception, 'feather',
+ marks=pytest.mark.xfail(reason="failing for pyarrow < 0.11.0")),
(pd.read_hdf, 'tables', FileNotFoundError, 'h5'),
(pd.read_stata, 'os', FileNotFoundError, 'dta'),
(pd.read_sas, 'os', FileNotFoundError, 'sas7bdat'),
@@ -160,7 +162,10 @@ def test_read_non_existant_read_table(self):
(pd.read_csv, 'os', ('io', 'data', 'iris.csv')),
(pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')),
(pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')),
- (pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')),
+ pytest.param(
+ pd.read_feather, 'feather',
+ ('io', 'data', 'feather-0_3_1.feather'),
+ marks=pytest.mark.xfail(reason="failing for pyarrow < 0.11.0")),
(pd.read_hdf, 'tables', ('io', 'data', 'legacy_hdf',
'datetimetz_object.h5')),
(pd.read_stata, 'os', ('io', 'data', 'stata10_115.dta')),
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 9d04111d64125..36118fb1303fc 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -17,6 +17,7 @@
fv = LooseVersion(feather.__version__)
+@pytest.mark.xfail(reason="failing for pyarrow < 0.11.0")
@pytest.mark.single
class TestFeather(object):
diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py
index 68413d610e615..bc604e066a3e8 100644
--- a/pandas/tests/io/test_gbq.py
+++ b/pandas/tests/io/test_gbq.py
@@ -109,6 +109,7 @@ def test_read_gbq_without_dialect_warns_future_change(monkeypatch):
pd.read_gbq("SELECT 1")
+@pytest.mark.xfail(reason="failing for pandas-gbq >= 0.7.0")
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index ab7f04ad86ffc..c92d9a489b5c3 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -437,6 +437,7 @@ def test_duplicate_columns(self, pa):
columns=list('aaa')).copy()
self.check_error_on_write(df, pa, ValueError)
+ @pytest.mark.xfail(reason="failing for pyarrow < 0.11.0")
def test_unsupported(self, pa):
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
| need to revert these when fixed
https://travis-ci.org/pandas-dev/pandas/builds/444265455
| https://api.github.com/repos/pandas-dev/pandas/pulls/23281 | 2018-10-22T18:06:24Z | 2018-10-23T02:54:45Z | 2018-10-23T02:54:45Z | 2018-10-23T18:11:16Z |
DOC: Added a Multi Index example for the Series.sum method | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c24872d7c89e9..e273fd020a011 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10185,6 +10185,40 @@ def _doc_parms(cls):
_sum_examples = """\
Examples
--------
+``MultiIndex`` series example of monthly rainfall
+
+>>> index = pd.MultiIndex.from_product(
+... [['London', 'New York'], ['Jun', 'Jul', 'Aug']],
+... names=['city', 'month'])
+>>> s = pd.Series([47, 35, 54, 112, 117, 113], index=index)
+>>> s
+city month
+London Jun 47
+ Jul 35
+ Aug 54
+New York Jun 112
+ Jul 117
+ Aug 113
+dtype: int64
+
+>>> s.sum()
+478
+
+Sum using level names, as well as indices
+
+>>> s.sum(level='city')
+city
+London 136
+New York 342
+dtype: int64
+
+>>> s.sum(level=1)
+month
+Jun 159
+Jul 152
+Aug 167
+dtype: int64
+
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
| Added a simple example of using the sum method on a multi-indexed series.
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` | https://api.github.com/repos/pandas-dev/pandas/pulls/23279 | 2018-10-22T17:38:40Z | 2018-10-25T23:37:37Z | 2018-10-25T23:37:37Z | 2018-10-26T09:47:25Z |
CLN: collected cleanups, warning suppression in tests | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 5056b5b82755c..2aa8395053b5c 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5400,9 +5400,7 @@ def _cmp_method(self, other, op):
with np.errstate(all="ignore"):
result = ops.comparison_op(self._values, np.asarray(other), op)
- if is_bool_dtype(result):
- return result
- return ops.invalid_comparison(self, other, op)
+ return result
@classmethod
def _add_numeric_methods_binary(cls):
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index be105f0035447..af02f426ca61a 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -501,9 +501,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]
# no need to downcast our float
# unless indicated
- if downcast is None and (
- self.is_float or self.is_timedelta or self.is_datetime
- ):
+ if downcast is None and (self.is_float or self.is_datelike):
return blocks
return extend_blocks([b.downcast(downcast) for b in blocks])
@@ -638,7 +636,7 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"):
if isinstance(values, np.ndarray):
values = values.reshape(self.shape)
- newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)
+ newb = self.make_block(values)
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
@@ -2481,7 +2479,7 @@ def f(mask, val, idx):
blocks = self.split_and_operate(None, f, False)
else:
values = f(None, self.values.ravel(), None)
- blocks = [make_block(values, ndim=self.ndim, placement=self.mgr_locs)]
+ blocks = [self.make_block(values)]
return blocks
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 7ad058cfeb83c..8d54f88558066 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -187,7 +187,7 @@ def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)}, {self.indexers})"
@cache_readonly
- def needs_filling(self):
+ def needs_filling(self) -> bool:
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
@@ -206,7 +206,7 @@ def dtype(self):
return get_dtype(maybe_promote(self.block.dtype, self.block.fill_value)[0])
@cache_readonly
- def is_na(self):
+ def is_na(self) -> bool:
if self.block is None:
return True
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 3e3330fa4378f..2903ede1d5c0b 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -4728,8 +4728,13 @@ def _set_tz(
assert values.tz is None or values.tz == tz
if tz is not None:
- name = getattr(values, "name", None)
- values = values.ravel()
+ if isinstance(values, DatetimeIndex):
+ name = values.name
+ values = values.asi8
+ else:
+ name = None
+ values = values.ravel()
+
tz = _ensure_decoded(tz)
values = DatetimeIndex(values, name=name)
values = values.tz_localize("UTC").tz_convert(tz)
diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py
index 1843126898f3d..e98545daaf049 100644
--- a/pandas/tests/extension/test_external_block.py
+++ b/pandas/tests/extension/test_external_block.py
@@ -11,15 +11,6 @@ class CustomBlock(ExtensionBlock):
_holder = np.ndarray
_can_hold_na = False
- def concat_same_type(self, to_concat, placement=None):
- """
- Always concatenate disregarding self.ndim as the values are
- always 1D in this custom Block
- """
- values = np.concatenate([blk.values for blk in to_concat])
- placement = self.mgr_locs if self.ndim == 2 else slice(len(values))
- return self.make_block_same_class(values, placement=placement)
-
@pytest.fixture
def df():
diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py
index 01c6fd4ec08f0..1b259ddbd41dc 100644
--- a/pandas/tests/frame/apply/test_frame_transform.py
+++ b/pandas/tests/frame/apply/test_frame_transform.py
@@ -168,14 +168,18 @@ def test_transform_bad_dtype(op):
if op in ("backfill", "shift", "pad", "bfill", "ffill"):
pytest.xfail("Transform function works on any datatype")
msg = "Transform function failed"
- with pytest.raises(ValueError, match=msg):
- df.transform(op)
- with pytest.raises(ValueError, match=msg):
- df.transform([op])
- with pytest.raises(ValueError, match=msg):
- df.transform({"A": op})
- with pytest.raises(ValueError, match=msg):
- df.transform({"A": [op]})
+
+ # tshift is deprecated
+ warn = None if op != "tshift" else FutureWarning
+ with tm.assert_produces_warning(warn, check_stacklevel=False):
+ with pytest.raises(ValueError, match=msg):
+ df.transform(op)
+ with pytest.raises(ValueError, match=msg):
+ df.transform([op])
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"A": op})
+ with pytest.raises(ValueError, match=msg):
+ df.transform({"A": [op]})
@pytest.mark.parametrize("op", transformation_kernels)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 8b5d0c7ade56c..f5d1808f367e7 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -605,7 +605,9 @@ def test_constructor_expanddim_lookup(self):
# raise NotImplementedError
df = DataFrame()
- inspect.getmembers(df)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ # _AXIS_NUMBERS, _AXIS_NAMES lookups
+ inspect.getmembers(df)
with pytest.raises(NotImplementedError, match="Not supported for DataFrames!"):
df._constructor_expanddim(np.arange(27).reshape(3, 3, 3))
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 73d2e99d3ff5e..bc178c138341f 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -613,8 +613,6 @@ def test_equals(self, index):
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
- if isinstance(index_a, PeriodIndex):
- pytest.skip("Skip check for PeriodIndex")
n = len(index_a)
index_b = index_a[0:-1]
diff --git a/pandas/tests/io/__init__.py b/pandas/tests/io/__init__.py
index e69de29bb2d1d..c5e867f45b92d 100644
--- a/pandas/tests/io/__init__.py
+++ b/pandas/tests/io/__init__.py
@@ -0,0 +1,17 @@
+import pytest
+
+pytestmark = [
+ # fastparquet
+ pytest.mark.filterwarnings(
+ "ignore:PY_SSIZE_T_CLEAN will be required.*:DeprecationWarning"
+ ),
+ # xlrd
+ pytest.mark.filterwarnings(
+ "ignore:This method will be removed in future versions:DeprecationWarning"
+ ),
+ pytest.mark.filterwarnings(
+ "ignore:This method will be removed in future versions. "
+ r"Use 'tree.iter\(\)' or 'list\(tree.iter\(\)\)' instead."
+ ":PendingDeprecationWarning"
+ ),
+]
diff --git a/pandas/tests/io/excel/__init__.py b/pandas/tests/io/excel/__init__.py
index 550172329fc57..419761cbe1d6d 100644
--- a/pandas/tests/io/excel/__init__.py
+++ b/pandas/tests/io/excel/__init__.py
@@ -1,6 +1,12 @@
import pytest
-pytestmark = pytest.mark.filterwarnings(
- # Looks like tree.getiterator is deprecated in favor of tree.iter
- "ignore:This method will be removed in future versions:PendingDeprecationWarning"
-)
+pytestmark = [
+ pytest.mark.filterwarnings(
+ # Looks like tree.getiterator is deprecated in favor of tree.iter
+ "ignore:This method will be removed in future versions:"
+ "PendingDeprecationWarning"
+ ),
+ pytest.mark.filterwarnings(
+ "ignore:This method will be removed in future versions:DeprecationWarning"
+ ),
+]
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 4bdcc5b327fa7..800b4c79b9c09 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -635,8 +635,6 @@ def test_read_from_s3_url(self, read_ext, s3_resource, s3so):
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
- # ignore warning from old xlrd
- @pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
def test_read_from_file_url(self, read_ext, datapath):
# FILE
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py
index ede8d61490778..2a6f3d1ad9380 100644
--- a/pandas/tests/io/test_common.py
+++ b/pandas/tests/io/test_common.py
@@ -245,11 +245,6 @@ def test_read_expands_user_home_dir(
),
],
)
- @pytest.mark.filterwarnings(
- "ignore:This method will be removed in future versions. "
- r"Use 'tree.iter\(\)' or 'list\(tree.iter\(\)\)' instead."
- ":PendingDeprecationWarning"
- )
def test_read_fspath_all(self, reader, module, path, datapath):
pytest.importorskip(module)
path = datapath(*path)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37021 | 2020-10-10T03:57:18Z | 2020-10-10T16:51:17Z | 2020-10-10T16:51:17Z | 2020-10-10T17:01:09Z |
CLN/REF: de-duplicate DatetimeTZBlock.setitem | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index be105f0035447..54ac1a3fd52c2 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -1758,6 +1758,14 @@ def setitem(self, indexer, value):
`indexer` is a direct slice/positional indexer. `value` must
be a compatible shape.
"""
+ if not self._can_hold_element(value):
+ # This is only relevant for DatetimeTZBlock, which has a
+ # non-trivial `_can_hold_element`.
+ # https://github.com/pandas-dev/pandas/issues/24020
+ # Need a dedicated setitem until GH#24020 (type promotion in setitem
+ # for extension arrays) is designed and implemented.
+ return self.astype(object).setitem(indexer, value)
+
if isinstance(indexer, tuple):
# TODO(EA2D): not needed with 2D EAs
# we are always 1-D
@@ -2175,7 +2183,13 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"):
def _can_hold_element(self, element: Any) -> bool:
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
- if self.is_datetimetz:
+ if isinstance(element, list) and len(element) == 0:
+ # Following DatetimeArray._validate_setitem_value
+ # convention, we treat this as object-dtype
+ # (even though tipo is float64)
+ return True
+
+ elif self.is_datetimetz:
# require exact match, since non-nano does not exist
return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype(
element, self.dtype
@@ -2339,21 +2353,6 @@ def fillna(self, value, limit=None, inplace=False, downcast=None):
value, limit=limit, inplace=inplace, downcast=downcast
)
- def setitem(self, indexer, value):
- # https://github.com/pandas-dev/pandas/issues/24020
- # Need a dedicated setitem until #24020 (type promotion in setitem
- # for extension arrays) is designed and implemented.
- if self._can_hold_element(value) or (
- isinstance(indexer, np.ndarray) and indexer.size == 0
- ):
- return super().setitem(indexer, value)
-
- obj_vals = self.values.astype(object)
- newb = make_block(
- obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim
- )
- return newb.setitem(indexer, value)
-
def quantile(self, qs, interpolation="linear", axis=0):
naive = self.values.view("M8[ns]")
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Move towards avoiding special treatment for pandas-internal EAs.
xref #24020 | https://api.github.com/repos/pandas-dev/pandas/pulls/37019 | 2020-10-10T02:41:27Z | 2020-10-10T16:27:02Z | 2020-10-10T16:27:02Z | 2020-10-10T16:29:24Z |
CLN: require td64 in TimeDeltaBlock | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index be105f0035447..0a4aa6d0a203a 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2377,7 +2377,11 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
def _maybe_coerce_values(self, values):
if values.dtype != TD64NS_DTYPE:
- # e.g. non-nano or int64
+ # non-nano we will convert to nano
+ if values.dtype.kind != "m":
+ # caller is responsible for ensuring timedelta64 dtype
+ raise TypeError(values.dtype) # pragma: no cover
+
values = TimedeltaArray._from_sequence(values)._data
if isinstance(values, TimedeltaArray):
values = values._data
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index f2ec04c1fc05d..52536583b9b0d 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -573,9 +573,9 @@ def interpolate_2d(
if ndim == 1:
result = result[0]
- if orig_values.dtype.kind == "M":
- # convert float back to datetime64
- result = result.astype(orig_values.dtype)
+ if orig_values.dtype.kind in ["m", "M"]:
+ # convert float back to datetime64/timedelta64
+ result = result.view(orig_values.dtype)
return result
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
only one place where the check currently fails, fixed in core.missing | https://api.github.com/repos/pandas-dev/pandas/pulls/37018 | 2020-10-10T02:36:55Z | 2020-10-10T16:33:48Z | 2020-10-10T16:33:48Z | 2020-10-10T16:35:10Z |
REF/TYP: define NDFrame numeric methods non-dynamically | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ebe5185ce4488..ac6b2e87a4f3c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8625,7 +8625,7 @@ def _reduce(
"will include datetime64 and datetime64tz columns in a "
"future version.",
FutureWarning,
- stacklevel=3,
+ stacklevel=5,
)
cols = self.columns[~dtype_is_dt]
self = self[cols]
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 338b45b5503dc..765974bf8043e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -51,7 +51,6 @@
TimestampConvertibleTypes,
ValueKeyFunc,
)
-from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, InvalidIndexError
@@ -10399,6 +10398,287 @@ def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
+ def _logical_func(
+ self, name: str, func, axis=0, bool_only=None, skipna=True, level=None, **kwargs
+ ):
+ nv.validate_logical_func(tuple(), kwargs, fname=name)
+ if level is not None:
+ if bool_only is not None:
+ raise NotImplementedError(
+ "Option bool_only is not implemented with option level."
+ )
+ return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
+
+ if self.ndim > 1 and axis is None:
+ # Reduce along one dimension then the other, to simplify DataFrame._reduce
+ res = self._logical_func(
+ name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs
+ )
+ return res._logical_func(name, func, skipna=skipna, **kwargs)
+
+ return self._reduce(
+ func,
+ name=name,
+ axis=axis,
+ skipna=skipna,
+ numeric_only=bool_only,
+ filter_type="bool",
+ )
+
+ def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
+ return self._logical_func(
+ "any", nanops.nanany, axis, bool_only, skipna, level, **kwargs
+ )
+
+ def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
+ return self._logical_func(
+ "all", nanops.nanall, axis, bool_only, skipna, level, **kwargs
+ )
+
+ def _accum_func(self, name: str, func, axis=None, skipna=True, *args, **kwargs):
+ skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
+ if axis is None:
+ axis = self._stat_axis_number
+ else:
+ axis = self._get_axis_number(axis)
+
+ if axis == 1:
+ return self.T._accum_func(
+ name, func, axis=0, skipna=skipna, *args, **kwargs
+ ).T
+
+ def block_accum_func(blk_values):
+ values = blk_values.T if hasattr(blk_values, "T") else blk_values
+
+ result = nanops.na_accum_func(values, func, skipna=skipna)
+
+ result = result.T if hasattr(result, "T") else result
+ return result
+
+ result = self._mgr.apply(block_accum_func)
+
+ return self._constructor(result).__finalize__(self, method=name)
+
+ def cummax(self, axis=None, skipna=True, *args, **kwargs):
+ return self._accum_func(
+ "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs
+ )
+
+ def cummin(self, axis=None, skipna=True, *args, **kwargs):
+ return self._accum_func(
+ "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs
+ )
+
+ def cumsum(self, axis=None, skipna=True, *args, **kwargs):
+ return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs)
+
+ def cumprod(self, axis=None, skipna=True, *args, **kwargs):
+ return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs)
+
+ def _stat_function_ddof(
+ self,
+ name: str,
+ func,
+ axis=None,
+ skipna=None,
+ level=None,
+ ddof=1,
+ numeric_only=None,
+ **kwargs,
+ ):
+ nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
+ if skipna is None:
+ skipna = True
+ if axis is None:
+ axis = self._stat_axis_number
+ if level is not None:
+ return self._agg_by_level(
+ name, axis=axis, level=level, skipna=skipna, ddof=ddof
+ )
+ return self._reduce(
+ func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
+ )
+
+ def sem(
+ self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
+ ):
+ return self._stat_function_ddof(
+ "sem", nanops.nansem, axis, skipna, level, ddof, numeric_only, **kwargs
+ )
+
+ def var(
+ self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
+ ):
+ return self._stat_function_ddof(
+ "var", nanops.nanvar, axis, skipna, level, ddof, numeric_only, **kwargs
+ )
+
+ def std(
+ self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
+ ):
+ return self._stat_function_ddof(
+ "std", nanops.nanstd, axis, skipna, level, ddof, numeric_only, **kwargs
+ )
+
+ def _stat_function(
+ self,
+ name: str,
+ func,
+ axis=None,
+ skipna=None,
+ level=None,
+ numeric_only=None,
+ **kwargs,
+ ):
+ if name == "median":
+ nv.validate_median(tuple(), kwargs)
+ else:
+ nv.validate_stat_func(tuple(), kwargs, fname=name)
+ if skipna is None:
+ skipna = True
+ if axis is None:
+ axis = self._stat_axis_number
+ if level is not None:
+ return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
+ return self._reduce(
+ func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
+ )
+
+ def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return self._stat_function(
+ "min", nanops.nanmin, axis, skipna, level, numeric_only, **kwargs
+ )
+
+ def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return self._stat_function(
+ "max", nanops.nanmax, axis, skipna, level, numeric_only, **kwargs
+ )
+
+ def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return self._stat_function(
+ "mean", nanops.nanmean, axis, skipna, level, numeric_only, **kwargs
+ )
+
+ def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return self._stat_function(
+ "median", nanops.nanmedian, axis, skipna, level, numeric_only, **kwargs
+ )
+
+ def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return self._stat_function(
+ "skew", nanops.nanskew, axis, skipna, level, numeric_only, **kwargs
+ )
+
+ def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return self._stat_function(
+ "kurt", nanops.nankurt, axis, skipna, level, numeric_only, **kwargs
+ )
+
+ kurtosis = kurt
+
+ def _min_count_stat_function(
+ self,
+ name: str,
+ func,
+ axis=None,
+ skipna=None,
+ level=None,
+ numeric_only=None,
+ min_count=0,
+ **kwargs,
+ ):
+ if name == "sum":
+ nv.validate_sum(tuple(), kwargs)
+ elif name == "prod":
+ nv.validate_prod(tuple(), kwargs)
+ else:
+ nv.validate_stat_func(tuple(), kwargs, fname=name)
+ if skipna is None:
+ skipna = True
+ if axis is None:
+ axis = self._stat_axis_number
+ if level is not None:
+ return self._agg_by_level(
+ name, axis=axis, level=level, skipna=skipna, min_count=min_count
+ )
+ return self._reduce(
+ func,
+ name=name,
+ axis=axis,
+ skipna=skipna,
+ numeric_only=numeric_only,
+ min_count=min_count,
+ )
+
+ def sum(
+ self,
+ axis=None,
+ skipna=None,
+ level=None,
+ numeric_only=None,
+ min_count=0,
+ **kwargs,
+ ):
+ return self._min_count_stat_function(
+ "sum", nanops.nansum, axis, skipna, level, numeric_only, min_count, **kwargs
+ )
+
+ def prod(
+ self,
+ axis=None,
+ skipna=None,
+ level=None,
+ numeric_only=None,
+ min_count=0,
+ **kwargs,
+ ):
+ return self._min_count_stat_function(
+ "prod",
+ nanops.nanprod,
+ axis,
+ skipna,
+ level,
+ numeric_only,
+ min_count,
+ **kwargs,
+ )
+
+ product = prod
+
+ def mad(self, axis=None, skipna=None, level=None):
+ """
+ {desc}
+
+ Parameters
+ ----------
+ axis : {axis_descr}
+ Axis for the function to be applied on.
+ skipna : bool, default None
+ Exclude NA/null values when computing the result.
+ level : int or level name, default None
+ If the axis is a MultiIndex (hierarchical), count along a
+ particular level, collapsing into a {name1}.
+
+ Returns
+ -------
+ {name1} or {name2} (if level specified)\
+ {see_also}\
+ {examples}
+ """
+ if skipna is None:
+ skipna = True
+ if axis is None:
+ axis = self._stat_axis_number
+ if level is not None:
+ return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
+
+ data = self._get_numeric_data()
+ if axis == 0:
+ demeaned = data - data.mean(axis=0)
+ else:
+ demeaned = data.sub(data.mean(axis=1), axis=0)
+ return np.abs(demeaned).mean(axis=axis, skipna=skipna)
+
@classmethod
def _add_numeric_operations(cls):
"""
@@ -10406,30 +10686,35 @@ def _add_numeric_operations(cls):
"""
axis_descr, name1, name2 = _doc_parms(cls)
- cls.any = _make_logical_function(
- cls,
- "any",
+ @doc(
+ _bool_doc,
+ desc=_any_desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
- desc=_any_desc,
- func=nanops.nanany,
see_also=_any_see_also,
examples=_any_examples,
empty_value=False,
)
- cls.all = _make_logical_function(
- cls,
- "all",
+ def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
+ return NDFrame.any(self, axis, bool_only, skipna, level, **kwargs)
+
+ cls.any = any
+
+ @doc(
+ _bool_doc,
+ desc=_all_desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
- desc=_all_desc,
- func=nanops.nanall,
see_also=_all_see_also,
examples=_all_examples,
empty_value=True,
)
+ def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
+ return NDFrame.all(self, axis, bool_only, skipna, level, **kwargs)
+
+ cls.all = all
@doc(
desc="Return the mean absolute deviation of the values "
@@ -10440,209 +10725,284 @@ def _add_numeric_operations(cls):
see_also="",
examples="",
)
+ @Appender(NDFrame.mad.__doc__)
def mad(self, axis=None, skipna=None, level=None):
- """
- {desc}
-
- Parameters
- ----------
- axis : {axis_descr}
- Axis for the function to be applied on.
- skipna : bool, default None
- Exclude NA/null values when computing the result.
- level : int or level name, default None
- If the axis is a MultiIndex (hierarchical), count along a
- particular level, collapsing into a {name1}.
-
- Returns
- -------
- {name1} or {name2} (if level specified)\
- {see_also}\
- {examples}
- """
- if skipna is None:
- skipna = True
- if axis is None:
- axis = self._stat_axis_number
- if level is not None:
- return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
-
- data = self._get_numeric_data()
- if axis == 0:
- demeaned = data - data.mean(axis=0)
- else:
- demeaned = data.sub(data.mean(axis=1), axis=0)
- return np.abs(demeaned).mean(axis=axis, skipna=skipna)
+ return NDFrame.mad(self, axis, skipna, level)
cls.mad = mad
- cls.sem = _make_stat_function_ddof(
- cls,
- "sem",
- name1=name1,
- name2=name2,
- axis_descr=axis_descr,
+ @doc(
+ _num_ddof_doc,
desc="Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
- func=nanops.nansem,
- )
- cls.var = _make_stat_function_ddof(
- cls,
- "var",
name1=name1,
name2=name2,
axis_descr=axis_descr,
+ )
+ def sem(
+ self,
+ axis=None,
+ skipna=None,
+ level=None,
+ ddof=1,
+ numeric_only=None,
+ **kwargs,
+ ):
+ return NDFrame.sem(self, axis, skipna, level, ddof, numeric_only, **kwargs)
+
+ cls.sem = sem
+
+ @doc(
+ _num_ddof_doc,
desc="Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
- func=nanops.nanvar,
- )
- cls.std = _make_stat_function_ddof(
- cls,
- "std",
name1=name1,
name2=name2,
axis_descr=axis_descr,
+ )
+ def var(
+ self,
+ axis=None,
+ skipna=None,
+ level=None,
+ ddof=1,
+ numeric_only=None,
+ **kwargs,
+ ):
+ return NDFrame.var(self, axis, skipna, level, ddof, numeric_only, **kwargs)
+
+ cls.var = var
+
+ @doc(
+ _num_ddof_doc,
desc="Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
- func=nanops.nanstd,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
)
+ def std(
+ self,
+ axis=None,
+ skipna=None,
+ level=None,
+ ddof=1,
+ numeric_only=None,
+ **kwargs,
+ ):
+ return NDFrame.std(self, axis, skipna, level, ddof, numeric_only, **kwargs)
- cls.cummin = _make_cum_function(
- cls,
- "cummin",
+ cls.std = std
+
+ @doc(
+ _cnum_doc,
+ desc="minimum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
- desc="minimum",
- accum_func=np.minimum.accumulate,
accum_func_name="min",
examples=_cummin_examples,
)
- cls.cumsum = _make_cum_function(
- cls,
- "cumsum",
+ def cummin(self, axis=None, skipna=True, *args, **kwargs):
+ return NDFrame.cummin(self, axis, skipna, *args, **kwargs)
+
+ cls.cummin = cummin
+
+ @doc(
+ _cnum_doc,
+ desc="maximum",
name1=name1,
name2=name2,
axis_descr=axis_descr,
+ accum_func_name="max",
+ examples=_cummax_examples,
+ )
+ def cummax(self, axis=None, skipna=True, *args, **kwargs):
+ return NDFrame.cummax(self, axis, skipna, *args, **kwargs)
+
+ cls.cummax = cummax
+
+ @doc(
+ _cnum_doc,
desc="sum",
- accum_func=np.cumsum,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
accum_func_name="sum",
examples=_cumsum_examples,
)
- cls.cumprod = _make_cum_function(
- cls,
- "cumprod",
+ def cumsum(self, axis=None, skipna=True, *args, **kwargs):
+ return NDFrame.cumsum(self, axis, skipna, *args, **kwargs)
+
+ cls.cumsum = cumsum
+
+ @doc(
+ _cnum_doc,
+ desc="product",
name1=name1,
name2=name2,
axis_descr=axis_descr,
- desc="product",
- accum_func=np.cumprod,
accum_func_name="prod",
examples=_cumprod_examples,
)
- cls.cummax = _make_cum_function(
- cls,
- "cummax",
- name1=name1,
- name2=name2,
- axis_descr=axis_descr,
- desc="maximum",
- accum_func=np.maximum.accumulate,
- accum_func_name="max",
- examples=_cummax_examples,
- )
+ def cumprod(self, axis=None, skipna=True, *args, **kwargs):
+ return NDFrame.cumprod(self, axis, skipna, *args, **kwargs)
+
+ cls.cumprod = cumprod
- cls.sum = _make_min_count_stat_function(
- cls,
- "sum",
+ @doc(
+ _num_doc,
+ desc="Return the sum of the values for the requested axis.\n\n"
+ "This is equivalent to the method ``numpy.sum``.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
- desc="Return the sum of the values for the requested axis.\n\n"
- "This is equivalent to the method ``numpy.sum``.",
- func=nanops.nansum,
+ min_count=_min_count_stub,
see_also=_stat_func_see_also,
examples=_sum_examples,
)
- cls.mean = _make_stat_function(
- cls,
- "mean",
+ def sum(
+ self,
+ axis=None,
+ skipna=None,
+ level=None,
+ numeric_only=None,
+ min_count=0,
+ **kwargs,
+ ):
+ return NDFrame.sum(
+ self, axis, skipna, level, numeric_only, min_count, **kwargs
+ )
+
+ cls.sum = sum
+
+ @doc(
+ _num_doc,
+ desc="Return the product of the values for the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
- desc="Return the mean of the values for the requested axis.",
- func=nanops.nanmean,
+ min_count=_min_count_stub,
+ see_also=_stat_func_see_also,
+ examples=_prod_examples,
)
- cls.skew = _make_stat_function(
- cls,
- "skew",
+ def prod(
+ self,
+ axis=None,
+ skipna=None,
+ level=None,
+ numeric_only=None,
+ min_count=0,
+ **kwargs,
+ ):
+ return NDFrame.prod(
+ self, axis, skipna, level, numeric_only, min_count, **kwargs
+ )
+
+ cls.prod = prod
+ cls.product = prod
+
+ @doc(
+ _num_doc,
+ desc="Return the mean of the values for the requested axis.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
- desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
- func=nanops.nanskew,
+ min_count="",
+ see_also="",
+ examples="",
)
- cls.kurt = _make_stat_function(
- cls,
- "kurt",
+ def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return NDFrame.mean(self, axis, skipna, level, numeric_only, **kwargs)
+
+ cls.mean = mean
+
+ @doc(
+ _num_doc,
+ desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.",
name1=name1,
name2=name2,
axis_descr=axis_descr,
+ min_count="",
+ see_also="",
+ examples="",
+ )
+ def skew(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return NDFrame.skew(self, axis, skipna, level, numeric_only, **kwargs)
+
+ cls.skew = skew
+
+ @doc(
+ _num_doc,
desc="Return unbiased kurtosis over requested axis.\n\n"
"Kurtosis obtained using Fisher's definition of\n"
"kurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
- func=nanops.nankurt,
- )
- cls.kurtosis = cls.kurt
- cls.prod = _make_min_count_stat_function(
- cls,
- "prod",
name1=name1,
name2=name2,
axis_descr=axis_descr,
- desc="Return the product of the values for the requested axis.",
- func=nanops.nanprod,
- examples=_prod_examples,
+ min_count="",
+ see_also="",
+ examples="",
)
- cls.product = cls.prod
- cls.median = _make_stat_function(
- cls,
- "median",
- name1=name1,
- name2=name2,
- axis_descr=axis_descr,
+ def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return NDFrame.kurt(self, axis, skipna, level, numeric_only, **kwargs)
+
+ cls.kurt = kurt
+ cls.kurtosis = kurt
+
+ @doc(
+ _num_doc,
desc="Return the median of the values for the requested axis.",
- func=nanops.nanmedian,
- )
- cls.max = _make_stat_function(
- cls,
- "max",
name1=name1,
name2=name2,
axis_descr=axis_descr,
+ min_count="",
+ see_also="",
+ examples="",
+ )
+ def median(
+ self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
+ ):
+ return NDFrame.median(self, axis, skipna, level, numeric_only, **kwargs)
+
+ cls.median = median
+
+ @doc(
+ _num_doc,
desc="Return the maximum of the values for the requested axis.\n\n"
"If you want the *index* of the maximum, use ``idxmax``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmax``.",
- func=nanops.nanmax,
- see_also=_stat_func_see_also,
- examples=_max_examples,
- )
- cls.min = _make_stat_function(
- cls,
- "min",
name1=name1,
name2=name2,
axis_descr=axis_descr,
+ min_count="",
+ see_also=_stat_func_see_also,
+ examples=_max_examples,
+ )
+ def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return NDFrame.max(self, axis, skipna, level, numeric_only, **kwargs)
+
+ cls.max = max
+
+ @doc(
+ _num_doc,
desc="Return the minimum of the values for the requested axis.\n\n"
"If you want the *index* of the minimum, use ``idxmin``. This is"
"the equivalent of the ``numpy.ndarray`` method ``argmin``.",
- func=nanops.nanmin,
+ name1=name1,
+ name2=name2,
+ axis_descr=axis_descr,
+ min_count="",
see_also=_stat_func_see_also,
examples=_min_examples,
)
+ def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
+ return NDFrame.min(self, axis, skipna, level, numeric_only, **kwargs)
+
+ cls.min = min
@doc(Rolling)
def rolling(
@@ -11422,218 +11782,3 @@ def _doc_parms(cls):
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
"""
-
-
-def _make_min_count_stat_function(
- cls,
- name: str,
- name1: str,
- name2: str,
- axis_descr: str,
- desc: str,
- func: Callable,
- see_also: str = "",
- examples: str = "",
-) -> Callable:
- @doc(
- _num_doc,
- desc=desc,
- name1=name1,
- name2=name2,
- axis_descr=axis_descr,
- min_count=_min_count_stub,
- see_also=see_also,
- examples=examples,
- )
- def stat_func(
- self,
- axis=None,
- skipna=None,
- level=None,
- numeric_only=None,
- min_count=0,
- **kwargs,
- ):
- if name == "sum":
- nv.validate_sum(tuple(), kwargs)
- elif name == "prod":
- nv.validate_prod(tuple(), kwargs)
- else:
- nv.validate_stat_func(tuple(), kwargs, fname=name)
- if skipna is None:
- skipna = True
- if axis is None:
- axis = self._stat_axis_number
- if level is not None:
- return self._agg_by_level(
- name, axis=axis, level=level, skipna=skipna, min_count=min_count
- )
- return self._reduce(
- func,
- name=name,
- axis=axis,
- skipna=skipna,
- numeric_only=numeric_only,
- min_count=min_count,
- )
-
- return set_function_name(stat_func, name, cls)
-
-
-def _make_stat_function(
- cls,
- name: str,
- name1: str,
- name2: str,
- axis_descr: str,
- desc: str,
- func: Callable,
- see_also: str = "",
- examples: str = "",
-) -> Callable:
- @doc(
- _num_doc,
- desc=desc,
- name1=name1,
- name2=name2,
- axis_descr=axis_descr,
- min_count="",
- see_also=see_also,
- examples=examples,
- )
- def stat_func(
- self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
- ):
- if name == "median":
- nv.validate_median(tuple(), kwargs)
- else:
- nv.validate_stat_func(tuple(), kwargs, fname=name)
- if skipna is None:
- skipna = True
- if axis is None:
- axis = self._stat_axis_number
- if level is not None:
- return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
- return self._reduce(
- func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only
- )
-
- return set_function_name(stat_func, name, cls)
-
-
-def _make_stat_function_ddof(
- cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable
-) -> Callable:
- @doc(_num_ddof_doc, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
- def stat_func(
- self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
- ):
- nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
- if skipna is None:
- skipna = True
- if axis is None:
- axis = self._stat_axis_number
- if level is not None:
- return self._agg_by_level(
- name, axis=axis, level=level, skipna=skipna, ddof=ddof
- )
- return self._reduce(
- func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
- )
-
- return set_function_name(stat_func, name, cls)
-
-
-def _make_cum_function(
- cls,
- name: str,
- name1: str,
- name2: str,
- axis_descr: str,
- desc: str,
- accum_func: Callable,
- accum_func_name: str,
- examples: str,
-) -> Callable:
- @doc(
- _cnum_doc,
- desc=desc,
- name1=name1,
- name2=name2,
- axis_descr=axis_descr,
- accum_func_name=accum_func_name,
- examples=examples,
- )
- def cum_func(self, axis=None, skipna=True, *args, **kwargs):
- skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
- if axis is None:
- axis = self._stat_axis_number
- else:
- axis = self._get_axis_number(axis)
-
- if axis == 1:
- return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T
-
- def block_accum_func(blk_values):
- values = blk_values.T if hasattr(blk_values, "T") else blk_values
-
- result = nanops.na_accum_func(values, accum_func, skipna=skipna)
-
- result = result.T if hasattr(result, "T") else result
- return result
-
- result = self._mgr.apply(block_accum_func)
-
- return self._constructor(result).__finalize__(self, method=name)
-
- return set_function_name(cum_func, name, cls)
-
-
-def _make_logical_function(
- cls,
- name: str,
- name1: str,
- name2: str,
- axis_descr: str,
- desc: str,
- func: Callable,
- see_also: str,
- examples: str,
- empty_value: bool,
-) -> Callable:
- @doc(
- _bool_doc,
- desc=desc,
- name1=name1,
- name2=name2,
- axis_descr=axis_descr,
- see_also=see_also,
- examples=examples,
- empty_value=empty_value,
- )
- def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
- nv.validate_logical_func(tuple(), kwargs, fname=name)
- if level is not None:
- if bool_only is not None:
- raise NotImplementedError(
- "Option bool_only is not implemented with option level."
- )
- return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
-
- if self.ndim > 1 and axis is None:
- # Reduce along one dimension then the other, to simplify DataFrame._reduce
- res = logical_func(
- self, axis=0, bool_only=bool_only, skipna=skipna, **kwargs
- )
- return logical_func(res, skipna=skipna, **kwargs)
-
- return self._reduce(
- func,
- name=name,
- axis=axis,
- skipna=skipna,
- numeric_only=bool_only,
- filter_type="bool",
- )
-
- return set_function_name(logical_func, name, cls)
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
cc @simonjayhawkins along with #36989 this gets most of the generic.py mypy complaints (notwithstanding complaints about `cls.foo = foo` that can be addressed by changing to `setattr(cls, "foo", foo)`.
it'd be nice to have a less-verbose way of pinning the docstrings. i haven't had any luck so far at making that work. | https://api.github.com/repos/pandas-dev/pandas/pulls/37017 | 2020-10-10T01:42:41Z | 2020-10-10T16:28:38Z | 2020-10-10T16:28:38Z | 2020-10-10T16:31:15Z |
ERR: Better error message for MultiIndex.astype | diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 0604f70316cfb..9942ac35b1c8c 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -3606,8 +3606,8 @@ def astype(self, dtype, copy=True):
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
- f"Setting {type(self)} dtype to anything other "
- "than object is not supported"
+ "Setting a MultiIndex dtype to anything other than object "
+ "is not supported"
)
elif copy is True:
return self._shallow_copy()
| Error message could be nicer I think
```python
TypeError: Setting <class 'pandas.core.indexes.multi.MultiIndex'> dtype to anything other than object is not supported
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/37016 | 2020-10-09T22:48:18Z | 2020-10-10T02:30:05Z | 2020-10-10T02:30:05Z | 2020-10-10T02:36:19Z |
Call finalize in Series.str | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 0ab95dd260a9c..4bff94fd8476b 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -473,7 +473,7 @@ Other
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`)
- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`)
-- Fixed metadata propagation in the :class:`Series.dt` accessor (:issue:`28283`)
+- Fixed metadata propagation in the :class:`Series.dt` and :class:`Series.str` accessors (:issue:`28283`)
- Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py
index cae8cc1baf1df..12096034b54e4 100644
--- a/pandas/core/strings/accessor.py
+++ b/pandas/core/strings/accessor.py
@@ -247,6 +247,8 @@ def _wrap_result(
from pandas import Index, MultiIndex
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
+ if isinstance(result, ABCDataFrame):
+ result = result.__finalize__(self._orig, name="str")
return result
assert result.ndim < 3
@@ -324,6 +326,11 @@ def cons_row(x):
# Must be a Series
cons = self._orig._constructor
result = cons(result, name=name, index=index)
+ result = result.__finalize__(self._orig, method="str")
+ if name is not None and result.ndim == 1:
+ # __finalize__ might copy over the original name, but we may
+ # want the new name (e.g. str.extract).
+ result.name = name
return result
def _get_series_list(self, others):
@@ -597,6 +604,7 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"):
else:
dtype = self._orig.dtype
result = Series(result, dtype=dtype, index=data.index, name=self._orig.name)
+ result = result.__finalize__(self._orig, method="str_cat")
return result
_shared_docs[
@@ -3034,7 +3042,8 @@ def str_extract(arr, pat, flags=0, expand=True):
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
- return _str_extract_frame(arr._orig, pat, flags=flags)
+ result = _str_extract_frame(arr._orig, pat, flags=flags)
+ return result.__finalize__(arr._orig, method="str_extract")
else:
result, name = _str_extract_noexpand(arr._orig, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index 6692102bc9008..25c926b1de4c6 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -598,22 +598,13 @@ def test_binops(args, annotate, all_arithmetic_functions):
[
operator.methodcaller("capitalize"),
operator.methodcaller("casefold"),
- pytest.param(
- operator.methodcaller("cat", ["a"]),
- marks=pytest.mark.xfail(reason="finalize not called."),
- ),
+ operator.methodcaller("cat", ["a"]),
operator.methodcaller("contains", "a"),
operator.methodcaller("count", "a"),
operator.methodcaller("encode", "utf-8"),
operator.methodcaller("endswith", "a"),
- pytest.param(
- operator.methodcaller("extract", r"(\w)(\d)"),
- marks=pytest.mark.xfail(reason="finalize not called."),
- ),
- pytest.param(
- operator.methodcaller("extract", r"(\w)(\d)"),
- marks=pytest.mark.xfail(reason="finalize not called."),
- ),
+ operator.methodcaller("extract", r"(\w)(\d)"),
+ operator.methodcaller("extract", r"(\w)(\d)", expand=False),
operator.methodcaller("find", "a"),
operator.methodcaller("findall", "a"),
operator.methodcaller("get", 0),
@@ -655,7 +646,6 @@ def test_binops(args, annotate, all_arithmetic_functions):
],
ids=idfn,
)
-@not_implemented_mark
def test_string_method(method):
s = pd.Series(["a1"])
s.attrs = {"a": 1}
| xref #28283
| https://api.github.com/repos/pandas-dev/pandas/pulls/37015 | 2020-10-09T20:56:04Z | 2020-10-10T22:25:59Z | 2020-10-10T22:25:59Z | 2020-10-11T20:09:39Z |
DOC: Clean merging.rst | diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst
index eeac0ed4837dd..f1a28dc30dd68 100644
--- a/doc/source/user_guide/merging.rst
+++ b/doc/source/user_guide/merging.rst
@@ -76,9 +76,8 @@ a simple example:
:suppress:
@savefig merging_concat_basic.png
- p.plot(frames, result,
- labels=['df1', 'df2', 'df3'], vertical=True);
- plt.close('all');
+ p.plot(frames, result, labels=["df1", "df2", "df3"], vertical=True);
+ plt.close("all");
Like its sibling function on ndarrays, ``numpy.concatenate``, ``pandas.concat``
takes a list or dict of homogeneously-typed objects and concatenates them with
@@ -86,8 +85,17 @@ some configurable handling of "what to do with the other axes":
::
- pd.concat(objs, axis=0, join='outer', ignore_index=False, keys=None,
- levels=None, names=None, verify_integrity=False, copy=True)
+ pd.concat(
+ objs,
+ axis=0,
+ join="outer",
+ ignore_index=False,
+ keys=None,
+ levels=None,
+ names=None,
+ verify_integrity=False,
+ copy=True,
+ )
* ``objs`` : a sequence or mapping of Series or DataFrame objects. If a
dict is passed, the sorted keys will be used as the ``keys`` argument, unless
@@ -128,9 +136,8 @@ with each of the pieces of the chopped up DataFrame. We can do this using the
:suppress:
@savefig merging_concat_keys.png
- p.plot(frames, result,
- labels=['df1', 'df2', 'df3'], vertical=True)
- plt.close('all');
+ p.plot(frames, result, labels=["df1", "df2", "df3"], vertical=True)
+ plt.close("all");
As you can see (if you've read the rest of the documentation), the resulting
object's index has a :ref:`hierarchical index <advanced.hierarchical>`. This
@@ -194,9 +201,8 @@ behavior:
:suppress:
@savefig merging_concat_axis1.png
- p.plot([df1, df4], result,
- labels=['df1', 'df4'], vertical=False);
- plt.close('all');
+ p.plot([df1, df4], result, labels=["df1", "df4"], vertical=False);
+ plt.close("all");
.. warning::
@@ -215,9 +221,8 @@ Here is the same thing with ``join='inner'``:
:suppress:
@savefig merging_concat_axis1_inner.png
- p.plot([df1, df4], result,
- labels=['df1', 'df4'], vertical=False);
- plt.close('all');
+ p.plot([df1, df4], result, labels=["df1", "df4"], vertical=False);
+ plt.close("all");
Lastly, suppose we just wanted to reuse the *exact index* from the original
DataFrame:
@@ -236,9 +241,8 @@ Similarly, we could index before the concatenation:
:suppress:
@savefig merging_concat_axis1_join_axes.png
- p.plot([df1, df4], result,
- labels=['df1', 'df4'], vertical=False);
- plt.close('all');
+ p.plot([df1, df4], result, labels=["df1", "df4"], vertical=False);
+ plt.close("all");
.. _merging.concatenation:
@@ -257,9 +261,8 @@ instance methods on ``Series`` and ``DataFrame``. These methods actually predate
:suppress:
@savefig merging_append1.png
- p.plot([df1, df2], result,
- labels=['df1', 'df2'], vertical=True);
- plt.close('all');
+ p.plot([df1, df2], result, labels=["df1", "df2"], vertical=True);
+ plt.close("all");
In the case of ``DataFrame``, the indexes must be disjoint but the columns do not
need to be:
@@ -272,9 +275,8 @@ need to be:
:suppress:
@savefig merging_append2.png
- p.plot([df1, df4], result,
- labels=['df1', 'df4'], vertical=True);
- plt.close('all');
+ p.plot([df1, df4], result, labels=["df1", "df4"], vertical=True);
+ plt.close("all");
``append`` may take multiple objects to concatenate:
@@ -286,9 +288,8 @@ need to be:
:suppress:
@savefig merging_append3.png
- p.plot([df1, df2, df3], result,
- labels=['df1', 'df2', 'df3'], vertical=True);
- plt.close('all');
+ p.plot([df1, df2, df3], result, labels=["df1", "df2", "df3"], vertical=True);
+ plt.close("all");
.. note::
@@ -312,9 +313,8 @@ do this, use the ``ignore_index`` argument:
:suppress:
@savefig merging_concat_ignore_index.png
- p.plot([df1, df4], result,
- labels=['df1', 'df4'], vertical=True);
- plt.close('all');
+ p.plot([df1, df4], result, labels=["df1", "df4"], vertical=True);
+ plt.close("all");
This is also a valid argument to :meth:`DataFrame.append`:
@@ -326,9 +326,8 @@ This is also a valid argument to :meth:`DataFrame.append`:
:suppress:
@savefig merging_append_ignore_index.png
- p.plot([df1, df4], result,
- labels=['df1', 'df4'], vertical=True);
- plt.close('all');
+ p.plot([df1, df4], result, labels=["df1", "df4"], vertical=True);
+ plt.close("all");
.. _merging.mixed_ndims:
@@ -348,9 +347,8 @@ the name of the ``Series``.
:suppress:
@savefig merging_concat_mixed_ndim.png
- p.plot([df1, s1], result,
- labels=['df1', 's1'], vertical=False);
- plt.close('all');
+ p.plot([df1, s1], result, labels=["df1", "s1"], vertical=False);
+ plt.close("all");
.. note::
@@ -370,9 +368,8 @@ If unnamed ``Series`` are passed they will be numbered consecutively.
:suppress:
@savefig merging_concat_unnamed_series.png
- p.plot([df1, s2], result,
- labels=['df1', 's2'], vertical=False);
- plt.close('all');
+ p.plot([df1, s2], result, labels=["df1", "s2"], vertical=False);
+ plt.close("all");
Passing ``ignore_index=True`` will drop all name references.
@@ -384,9 +381,8 @@ Passing ``ignore_index=True`` will drop all name references.
:suppress:
@savefig merging_concat_series_ignore_index.png
- p.plot([df1, s1], result,
- labels=['df1', 's1'], vertical=False);
- plt.close('all');
+ p.plot([df1, s1], result, labels=["df1", "s1"], vertical=False);
+ plt.close("all");
More concatenating with group keys
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -420,9 +416,8 @@ Let's consider a variation of the very first example presented:
:suppress:
@savefig merging_concat_group_keys2.png
- p.plot(frames, result,
- labels=['df1', 'df2', 'df3'], vertical=True);
- plt.close('all');
+ p.plot(frames, result, labels=["df1", "df2", "df3"], vertical=True);
+ plt.close("all");
You can also pass a dict to ``concat`` in which case the dict keys will be used
for the ``keys`` argument (unless other keys are specified):
@@ -436,9 +431,8 @@ for the ``keys`` argument (unless other keys are specified):
:suppress:
@savefig merging_concat_dict.png
- p.plot([df1, df2, df3], result,
- labels=['df1', 'df2', 'df3'], vertical=True);
- plt.close('all');
+ p.plot([df1, df2, df3], result, labels=["df1", "df2", "df3"], vertical=True);
+ plt.close("all");
.. ipython:: python
@@ -448,9 +442,8 @@ for the ``keys`` argument (unless other keys are specified):
:suppress:
@savefig merging_concat_dict_keys.png
- p.plot([df1, df2, df3], result,
- labels=['df1', 'df2', 'df3'], vertical=True);
- plt.close('all');
+ p.plot([df1, df2, df3], result, labels=["df1", "df2", "df3"], vertical=True);
+ plt.close("all");
The MultiIndex created has levels that are constructed from the passed keys and
the index of the ``DataFrame`` pieces:
@@ -472,9 +465,8 @@ do so using the ``levels`` argument:
:suppress:
@savefig merging_concat_dict_keys_names.png
- p.plot([df1, df2, df3], result,
- labels=['df1', 'df2', 'df3'], vertical=True);
- plt.close('all');
+ p.plot([df1, df2, df3], result, labels=["df1", "df2", "df3"], vertical=True);
+ plt.close("all");
.. ipython:: python
@@ -501,9 +493,8 @@ append a single row to a ``DataFrame`` by passing a ``Series`` or dict to
:suppress:
@savefig merging_append_series_as_row.png
- p.plot([df1, s2], result,
- labels=['df1', 's2'], vertical=True);
- plt.close('all');
+ p.plot([df1, s2], result, labels=["df1", "s2"], vertical=True);
+ plt.close("all");
You should use ``ignore_index`` with this method to instruct DataFrame to
discard its index. If you wish to preserve the index, you should construct an
@@ -520,9 +511,8 @@ You can also pass a list of dicts or Series:
:suppress:
@savefig merging_append_dits.png
- p.plot([df1, pd.DataFrame(dicts)], result,
- labels=['df1', 'dicts'], vertical=True);
- plt.close('all');
+ p.plot([df1, pd.DataFrame(dicts)], result, labels=["df1", "dicts"], vertical=True);
+ plt.close("all");
.. _merging.join:
@@ -546,10 +536,21 @@ all standard database join operations between ``DataFrame`` or named ``Series``
::
- pd.merge(left, right, how='inner', on=None, left_on=None, right_on=None,
- left_index=False, right_index=False, sort=True,
- suffixes=('_x', '_y'), copy=True, indicator=False,
- validate=None)
+ pd.merge(
+ left,
+ right,
+ how="inner",
+ on=None,
+ left_on=None,
+ right_on=None,
+ left_index=False,
+ right_index=False,
+ sort=True,
+ suffixes=("_x", "_y"),
+ copy=True,
+ indicator=False,
+ validate=None,
+ )
* ``left``: A DataFrame or named Series object.
* ``right``: Another DataFrame or named Series object.
@@ -664,9 +665,8 @@ key combination:
:suppress:
@savefig merging_merge_on_key.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
Here is a more complicated example with multiple join keys. Only the keys
appearing in ``left`` and ``right`` are present (the intersection), since
@@ -698,9 +698,8 @@ appearing in ``left`` and ``right`` are present (the intersection), since
:suppress:
@savefig merging_merge_on_key_multiple.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
The ``how`` argument to ``merge`` specifies how to determine which keys are to
be included in the resulting table. If a key combination **does not appear** in
@@ -724,9 +723,8 @@ either the left or right tables, the values in the joined table will be
:suppress:
@savefig merging_merge_on_key_left.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. ipython:: python
@@ -736,8 +734,7 @@ either the left or right tables, the values in the joined table will be
:suppress:
@savefig merging_merge_on_key_right.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
.. ipython:: python
@@ -747,9 +744,8 @@ either the left or right tables, the values in the joined table will be
:suppress:
@savefig merging_merge_on_key_outer.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. ipython:: python
@@ -759,9 +755,8 @@ either the left or right tables, the values in the joined table will be
:suppress:
@savefig merging_merge_on_key_inner.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
You can merge a mult-indexed Series and a DataFrame, if the names of
the MultiIndex correspond to the columns from the DataFrame. Transform
@@ -798,9 +793,8 @@ Here is another example with duplicate join keys in DataFrames:
:suppress:
@savefig merging_merge_on_key_dup.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. warning::
@@ -829,7 +823,7 @@ In the following example, there are duplicate values of ``B`` in the right
.. code-block:: ipython
- In [53]: result = pd.merge(left, right, on='B', how='outer', validate="one_to_one")
+ In [53]: result = pd.merge(left, right, on="B", how="outer", validate="one_to_one")
...
MergeError: Merge keys are not unique in right dataset; not a one-to-one merge
@@ -973,9 +967,8 @@ potentially differently-indexed ``DataFrames`` into a single result
:suppress:
@savefig merging_join.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. ipython:: python
@@ -985,9 +978,8 @@ potentially differently-indexed ``DataFrames`` into a single result
:suppress:
@savefig merging_join_outer.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
The same as above, but with ``how='inner'``.
@@ -999,9 +991,8 @@ The same as above, but with ``how='inner'``.
:suppress:
@savefig merging_join_inner.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
The data alignment here is on the indexes (row labels). This same behavior can
be achieved using ``merge`` plus additional arguments instructing it to use the
@@ -1015,9 +1006,8 @@ indexes:
:suppress:
@savefig merging_merge_index_outer.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. ipython:: python
@@ -1027,9 +1017,8 @@ indexes:
:suppress:
@savefig merging_merge_index_inner.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
Joining key columns on an index
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1042,8 +1031,9 @@ completely equivalent:
::
left.join(right, on=key_or_keys)
- pd.merge(left, right, left_on=key_or_keys, right_index=True,
- how='left', sort=False)
+ pd.merge(
+ left, right, left_on=key_or_keys, right_index=True, how="left", sort=False
+ )
Obviously you can choose whichever form you find more convenient. For
many-to-one joins (where one of the ``DataFrame``'s is already indexed by the
@@ -1067,9 +1057,8 @@ join key), using ``join`` may be more convenient. Here is a simple example:
:suppress:
@savefig merging_join_key_columns.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. ipython:: python
@@ -1081,9 +1070,8 @@ join key), using ``join`` may be more convenient. Here is a simple example:
:suppress:
@savefig merging_merge_key_columns.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. _merging.multikey_join:
@@ -1117,9 +1105,8 @@ Now this can be joined by passing the two key column names:
:suppress:
@savefig merging_join_multikeys.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. _merging.df_inner_join:
@@ -1136,9 +1123,8 @@ easily performed:
:suppress:
@savefig merging_join_multikeys_inner.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
As you can see, this drops any rows where there was no match.
@@ -1153,41 +1139,44 @@ a level name of the MultiIndexed frame.
.. ipython:: python
- left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
- 'B': ['B0', 'B1', 'B2']},
- index=pd.Index(['K0', 'K1', 'K2'], name='key'))
+ left = pd.DataFrame(
+ {"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]},
+ index=pd.Index(["K0", "K1", "K2"], name="key"),
+ )
- index = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'),
- ('K2', 'Y2'), ('K2', 'Y3')],
- names=['key', 'Y'])
- right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
- 'D': ['D0', 'D1', 'D2', 'D3']},
- index=index)
+ index = pd.MultiIndex.from_tuples(
+ [("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")],
+ names=["key", "Y"],
+ )
+ right = pd.DataFrame(
+ {"C": ["C0", "C1", "C2", "C3"], "D": ["D0", "D1", "D2", "D3"]},
+ index=index,
+ )
+
+ result = left.join(right, how="inner")
- result = left.join(right, how='inner')
.. ipython:: python
:suppress:
@savefig merging_join_multiindex_inner.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
This is equivalent but less verbose and more memory efficient / faster than this.
.. ipython:: python
- result = pd.merge(left.reset_index(), right.reset_index(),
- on=['key'], how='inner').set_index(['key','Y'])
+ result = pd.merge(
+ left.reset_index(), right.reset_index(), on=["key"], how="inner"
+ ).set_index(["key","Y"])
.. ipython:: python
:suppress:
@savefig merging_merge_multiindex_alternative.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. _merging.join_with_two_multi_indexes:
@@ -1241,9 +1230,8 @@ done using the following code.
:suppress:
@savefig merging_merge_two_multiindex.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. _merging.merge_on_columns_and_levels:
@@ -1285,9 +1273,8 @@ resetting indexes.
:suppress:
@savefig merge_on_index_and_column.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. note::
@@ -1325,9 +1312,8 @@ columns:
:suppress:
@savefig merging_merge_overlapped.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. ipython:: python
@@ -1337,9 +1323,8 @@ columns:
:suppress:
@savefig merging_merge_overlapped_suffix.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
:meth:`DataFrame.join` has ``lsuffix`` and ``rsuffix`` arguments which behave
similarly.
@@ -1354,9 +1339,8 @@ similarly.
:suppress:
@savefig merging_merge_overlapped_multi_suffix.png
- p.plot([left, right], result,
- labels=['left', 'right'], vertical=False);
- plt.close('all');
+ p.plot([left, right], result, labels=["left", "right"], vertical=False);
+ plt.close("all");
.. _merging.multiple_join:
@@ -1375,9 +1359,13 @@ to join them together on their indexes.
:suppress:
@savefig merging_join_multi_df.png
- p.plot([left, right, right2], result,
- labels=['left', 'right', 'right2'], vertical=False);
- plt.close('all');
+ p.plot(
+ [left, right, right2],
+ result,
+ labels=["left", "right", "right2"],
+ vertical=False,
+ );
+ plt.close("all");
.. _merging.combine_first.update:
@@ -1405,9 +1393,8 @@ For this, use the :meth:`~DataFrame.combine_first` method:
:suppress:
@savefig merging_combine_first.png
- p.plot([df1, df2], result,
- labels=['df1', 'df2'], vertical=False);
- plt.close('all');
+ p.plot([df1, df2], result, labels=["df1", "df2"], vertical=False);
+ plt.close("all");
Note that this method only takes values from the right ``DataFrame`` if they are
missing in the left ``DataFrame``. A related method, :meth:`~DataFrame.update`,
@@ -1426,9 +1413,8 @@ alters non-NA values in place:
:suppress:
@savefig merging_update.png
- p.plot([df1_copy, df2], df1,
- labels=['df1', 'df2'], vertical=False);
- plt.close('all');
+ p.plot([df1_copy, df2], df1, labels=["df1", "df2"], vertical=False);
+ plt.close("all");
.. _merging.time_series:
| Closes #36777 | https://api.github.com/repos/pandas-dev/pandas/pulls/37014 | 2020-10-09T20:45:44Z | 2020-10-10T22:24:29Z | 2020-10-10T22:24:28Z | 2020-10-10T22:46:54Z |
DOC: pandas.Index.astype says it raises ValueError instead of TypeError | diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 539f5515a2f8b..5056b5b82755c 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -652,7 +652,7 @@ def astype(self, dtype, copy=True):
Create an Index with values cast to dtypes.
The class of a new Index is determined by dtype. When conversion is
- impossible, a ValueError exception is raised.
+ impossible, a TypeError exception is raised.
Parameters
----------
| - [X] closes #37012
- [X] passes pytest
- [X] passes `black pandas`
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/37013 | 2020-10-09T20:36:34Z | 2020-10-10T07:40:53Z | 2020-10-10T07:40:52Z | 2020-10-10T07:41:00Z |
REF/TYP: consistent return type for Block.replace | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index fcc923c97cf83..ffd6de53e2120 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -705,7 +705,7 @@ def replace(
inplace: bool = False,
regex: bool = False,
convert: bool = True,
- ):
+ ) -> List["Block"]:
"""
replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
@@ -796,9 +796,11 @@ def replace(
)
return blocks
- def _replace_single(self, *args, **kwargs):
+ def _replace_single(
+ self, to_replace, value, inplace=False, regex=False, convert=True, mask=None
+ ) -> List["Block"]:
""" no-op on a non-ObjectBlock """
- return self if kwargs["inplace"] else self.copy()
+ return [self] if inplace else [self.copy()]
def _replace_list(
self,
@@ -840,16 +842,13 @@ def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray:
to_replace=src,
value=dest,
inplace=inplace,
- convert=convert,
regex=regex,
)
- if m.any() or convert:
- if isinstance(result, list):
- new_rb.extend(result)
- else:
- new_rb.append(result)
- else:
- new_rb.append(blk)
+ if convert and blk.is_object:
+ result = extend_blocks(
+ [b.convert(numeric=False, copy=True) for b in result]
+ )
+ new_rb.extend(result)
rb = new_rb
return rb
@@ -1547,9 +1546,8 @@ def _replace_coerce(
value,
inplace: bool = True,
regex: bool = False,
- convert: bool = False,
mask=None,
- ):
+ ) -> List["Block"]:
"""
Replace value corresponding to the given boolean array with another
value.
@@ -1564,14 +1562,12 @@ def _replace_coerce(
Perform inplace modification.
regex : bool, default False
If true, perform regular expression substitution.
- convert : bool, default True
- If true, try to coerce any object types to better types.
mask : array-like of bool, optional
True indicate corresponding element is ignored.
Returns
-------
- A new block if there is anything to replace or the original block.
+ List[Block]
"""
if mask.any():
if not regex:
@@ -1583,10 +1579,10 @@ def _replace_coerce(
value,
inplace=inplace,
regex=regex,
- convert=convert,
+ convert=False,
mask=mask,
)
- return self
+ return [self]
class ExtensionBlock(Block):
@@ -2488,14 +2484,16 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"]
def _can_hold_element(self, element: Any) -> bool:
return True
- def replace(self, to_replace, value, inplace=False, regex=False, convert=True):
+ def replace(
+ self, to_replace, value, inplace=False, regex=False, convert=True
+ ) -> List["Block"]:
to_rep_is_list = is_list_like(to_replace)
value_is_list = is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
- result_blocks = []
- blocks = [self]
+ result_blocks: List["Block"] = []
+ blocks: List["Block"] = [self]
if not either_list and is_re(to_replace):
return self._replace_single(
@@ -2512,7 +2510,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True):
result = b._replace_single(
to_rep, v, inplace=inplace, regex=regex, convert=convert
)
- result_blocks = extend_blocks(result, result_blocks)
+ result_blocks.extend(result)
blocks = result_blocks
return result_blocks
@@ -2523,7 +2521,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True):
result = b._replace_single(
to_rep, value, inplace=inplace, regex=regex, convert=convert
)
- result_blocks = extend_blocks(result, result_blocks)
+ result_blocks.extend(result)
blocks = result_blocks
return result_blocks
@@ -2533,7 +2531,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True):
def _replace_single(
self, to_replace, value, inplace=False, regex=False, convert=True, mask=None
- ):
+ ) -> List["Block"]:
"""
Replace elements by the given value.
@@ -2554,7 +2552,7 @@ def _replace_single(
Returns
-------
- a new block, the result after replacing
+ List[Block]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
@@ -2628,48 +2626,6 @@ def re_replacer(s):
nbs = [block]
return nbs
- def _replace_coerce(
- self, to_replace, value, inplace=True, regex=False, convert=False, mask=None
- ):
- """
- Replace value corresponding to the given boolean array with another
- value.
-
- Parameters
- ----------
- to_replace : object or pattern
- Scalar to replace or regular expression to match.
- value : object
- Replacement object.
- inplace : bool, default False
- Perform inplace modification.
- regex : bool, default False
- If true, perform regular expression substitution.
- convert : bool, default True
- If true, try to coerce any object types to better types.
- mask : array-like of bool, optional
- True indicate corresponding element is ignored.
-
- Returns
- -------
- A new block if there is anything to replace or the original block.
- """
- if mask.any():
- nbs = super()._replace_coerce(
- to_replace=to_replace,
- value=value,
- inplace=inplace,
- regex=regex,
- convert=convert,
- mask=mask,
- )
- if convert:
- nbs = extend_blocks([b.convert(numeric=False, copy=True) for b in nbs])
- return nbs
- if convert:
- return self.convert(numeric=False, copy=True)
- return [self]
-
class CategoricalBlock(ExtensionBlock):
__slots__ = ()
@@ -2681,12 +2637,12 @@ def replace(
inplace: bool = False,
regex: bool = False,
convert: bool = True,
- ):
+ ) -> List["Block"]:
inplace = validate_bool_kwarg(inplace, "inplace")
result = self if inplace else self.copy()
result.values.replace(to_replace, value, inplace=True)
- return result
+ return [result]
# -----------------------------------------------------------------
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
move some convert logic out of _replace_coerce to avoid an otherwise-duplicated method | https://api.github.com/repos/pandas-dev/pandas/pulls/37010 | 2020-10-09T19:13:07Z | 2020-10-10T17:42:31Z | 2020-10-10T17:42:31Z | 2020-10-10T18:08:15Z |
CLN: standardize values coercion in Blocks | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index fcc923c97cf83..be105f0035447 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -124,7 +124,7 @@ def _simple_new(
def __init__(self, values, placement, ndim=None):
self.ndim = self._check_ndim(values, ndim)
self.mgr_locs = placement
- self.values = values
+ self.values = self._maybe_coerce_values(values)
if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values):
raise ValueError(
@@ -132,6 +132,20 @@ def __init__(self, values, placement, ndim=None):
f"placement implies {len(self.mgr_locs)}"
)
+ def _maybe_coerce_values(self, values):
+ """
+ Ensure we have correctly-typed values.
+
+ Parameters
+ ----------
+ values : np.ndarray, ExtensionArray, Index
+
+ Returns
+ -------
+ np.ndarray or ExtensionArray
+ """
+ return values
+
def _check_ndim(self, values, ndim):
"""
ndim inference and validation.
@@ -1614,7 +1628,6 @@ def __init__(self, values, placement, ndim=None):
This will call continue to call __init__ for the other base
classes mixed in with this Mixin.
"""
- values = self._maybe_coerce_values(values)
# Placement must be converted to BlockPlacement so that we can check
# its length
@@ -2109,10 +2122,6 @@ class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
is_datetime = True
- def __init__(self, values, placement, ndim=None):
- values = self._maybe_coerce_values(values)
- super().__init__(values, placement=placement, ndim=ndim)
-
@property
def _can_hold_na(self):
return True
@@ -2366,14 +2375,14 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
is_numeric = False
fill_value = np.timedelta64("NaT", "ns")
- def __init__(self, values, placement, ndim=None):
+ def _maybe_coerce_values(self, values):
if values.dtype != TD64NS_DTYPE:
# e.g. non-nano or int64
values = TimedeltaArray._from_sequence(values)._data
if isinstance(values, TimedeltaArray):
values = values._data
assert isinstance(values, np.ndarray), type(values)
- super().__init__(values, placement=placement, ndim=ndim)
+ return values
@property
def _holder(self):
@@ -2426,11 +2435,10 @@ class ObjectBlock(Block):
is_object = True
_can_hold_na = True
- def __init__(self, values, placement=None, ndim=2):
+ def _maybe_coerce_values(self, values):
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
-
- super().__init__(values, ndim=ndim, placement=placement)
+ return values
@property
def is_bool(self):
| - [x] closes #19492
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/37009 | 2020-10-09T16:39:36Z | 2020-10-09T21:13:01Z | 2020-10-09T21:13:01Z | 2020-10-09T21:25:54Z |
Add pandas-genomics to the ecosystem list | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index 8f04d05cfcb04..25ca77627ef39 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -435,6 +435,11 @@ found in NumPy or pandas, which work well with pandas' data containers.
Cyberpandas provides an extension type for storing arrays of IP Addresses. These
arrays can be stored inside pandas' Series and DataFrame.
+`Pandas-Genomics`_
+~~~~~~~~~~~~~~~~~~
+
+Pandas-Genomics provides extension types and extension arrays for working with genomics data
+
`Pint-Pandas`_
~~~~~~~~~~~~~~
@@ -465,6 +470,7 @@ Library Accessor Classes Description
.. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest
.. _pdvega: https://altair-viz.github.io/pdvega/
.. _Altair: https://altair-viz.github.io/
+.. _pandas-genomics: https://pandas-genomics.readthedocs.io/en/latest/
.. _pandas_path: https://github.com/drivendataorg/pandas-path/
.. _pathlib.Path: https://docs.python.org/3/library/pathlib.html
.. _pint-pandas: https://github.com/hgrecco/pint-pandas
| - [x] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Added [as suggested here](https://github.com/pandas-dev/pandas/pull/36987#pullrequestreview-505157583). | https://api.github.com/repos/pandas-dev/pandas/pulls/37008 | 2020-10-09T16:31:31Z | 2020-10-10T17:45:43Z | 2020-10-10T17:45:42Z | 2020-10-10T17:45:46Z |
Backport PR #36950 on branch 1.1.x (REGR: Allow positional arguments in DataFrame.agg) | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index f9127ee8d13e7..3ad8d981be2c9 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression where attempting to mutate a :class:`DateOffset` object would no longer raise an ``AttributeError`` (:issue:`36940`)
+- Fixed regression where :meth:`DataFrame.agg` would fail with :exc:`TypeError` when passed positional arguments to be passed on to the aggregation function (:issue:`36948`).
- Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0cbcb0ce3d700..f5dbb77d16165 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7359,7 +7359,7 @@ def aggregate(self, func=None, axis=0, *args, **kwargs):
result = None
try:
- result, how = self._aggregate(func, axis=axis, *args, **kwargs)
+ result, how = self._aggregate(func, axis, *args, **kwargs)
except TypeError as err:
exc = TypeError(
"DataFrame constructor called with "
diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py
index 1657abcc96d76..a89a20fc69ef8 100644
--- a/pandas/tests/frame/apply/test_frame_apply.py
+++ b/pandas/tests/frame/apply/test_frame_apply.py
@@ -1480,6 +1480,34 @@ def test_agg_cython_table_raises(self, df, func, expected, axis):
with pytest.raises(expected, match=msg):
df.agg(func, axis=axis)
+ @pytest.mark.parametrize("axis", [0, 1])
+ @pytest.mark.parametrize(
+ "args, kwargs",
+ [
+ ((1, 2, 3), {}),
+ ((8, 7, 15), {}),
+ ((1, 2), {}),
+ ((1,), {"b": 2}),
+ ((), {"a": 1, "b": 2}),
+ ((), {"a": 2, "b": 1}),
+ ((), {"a": 1, "b": 2, "c": 3}),
+ ],
+ )
+ def test_agg_args_kwargs(self, axis, args, kwargs):
+ def f(x, a, b, c=3):
+ return x.sum() + (a + b) / c
+
+ df = pd.DataFrame([[1, 2], [3, 4]])
+
+ if axis == 0:
+ expected = pd.Series([5.0, 7.0])
+ else:
+ expected = pd.Series([4.0, 8.0])
+
+ result = df.agg(f, axis, *args, **kwargs)
+
+ tm.assert_series_equal(result, expected)
+
@pytest.mark.parametrize("num_cols", [2, 3, 5])
def test_frequency_is_original(self, num_cols):
# GH 22150
| Backport PR #36950: REGR: Allow positional arguments in DataFrame.agg | https://api.github.com/repos/pandas-dev/pandas/pulls/37005 | 2020-10-09T13:35:24Z | 2020-10-09T14:35:07Z | 2020-10-09T14:35:07Z | 2020-10-09T14:35:07Z |
CI: isort fixup on 1.1.x | diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd
index 0499eabf708af..eebf89d9650e5 100644
--- a/pandas/_libs/hashtable.pxd
+++ b/pandas/_libs/hashtable.pxd
@@ -1,8 +1,16 @@
-from pandas._libs.khash cimport (
- kh_int64_t, kh_uint64_t, kh_float64_t, kh_pymap_t, kh_str_t, uint64_t,
- int64_t, float64_t)
from numpy cimport ndarray
+from pandas._libs.khash cimport (
+ float64_t,
+ int64_t,
+ kh_float64_t,
+ kh_int64_t,
+ kh_pymap_t,
+ kh_str_t,
+ kh_uint64_t,
+ uint64_t,
+)
+
# prototypes for sharing
cdef class HashTable:
diff --git a/pandas/_libs/khash.pxd b/pandas/_libs/khash.pxd
index b5fe73df5d9be..1bb3a158b4b1a 100644
--- a/pandas/_libs/khash.pxd
+++ b/pandas/_libs/khash.pxd
@@ -1,5 +1,6 @@
from cpython.object cimport PyObject
-from numpy cimport int64_t, uint64_t, int32_t, uint32_t, float64_t
+from numpy cimport float64_t, int32_t, int64_t, uint32_t, uint64_t
+
cdef extern from "khash_python.h":
ctypedef uint32_t khint_t
diff --git a/pandas/_libs/missing.pxd b/pandas/_libs/missing.pxd
index 090c5c5173280..e02b84381b62c 100644
--- a/pandas/_libs/missing.pxd
+++ b/pandas/_libs/missing.pxd
@@ -1,5 +1,6 @@
from numpy cimport ndarray, uint8_t
+
cpdef bint checknull(object val)
cpdef bint checknull_old(object val)
cpdef ndarray[uint8_t] isnaobj(ndarray arr)
diff --git a/pandas/_libs/tslibs/ccalendar.pxd b/pandas/_libs/tslibs/ccalendar.pxd
index 4eb5188b8a04b..388fd0c62b937 100644
--- a/pandas/_libs/tslibs/ccalendar.pxd
+++ b/pandas/_libs/tslibs/ccalendar.pxd
@@ -1,6 +1,5 @@
from cython cimport Py_ssize_t
-
-from numpy cimport int64_t, int32_t
+from numpy cimport int32_t, int64_t
ctypedef (int32_t, int32_t, int32_t) iso_calendar_t
diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd
index 73772e5ab4577..31a6862be5fbf 100644
--- a/pandas/_libs/tslibs/conversion.pxd
+++ b/pandas/_libs/tslibs/conversion.pxd
@@ -1,6 +1,5 @@
from cpython.datetime cimport datetime, tzinfo
-
-from numpy cimport int64_t, int32_t, ndarray
+from numpy cimport int32_t, int64_t, ndarray
from pandas._libs.tslibs.np_datetime cimport npy_datetimestruct
diff --git a/pandas/_libs/tslibs/nattype.pxd b/pandas/_libs/tslibs/nattype.pxd
index 3f7240654d7e8..d38f4518f9bf0 100644
--- a/pandas/_libs/tslibs/nattype.pxd
+++ b/pandas/_libs/tslibs/nattype.pxd
@@ -1,6 +1,7 @@
from cpython.datetime cimport datetime
-
from numpy cimport int64_t
+
+
cdef int64_t NPY_NAT
cdef bint _nat_scalar_rules[6]
diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd
index eebdcb3ace507..b2524c6bc6c0d 100644
--- a/pandas/_libs/tslibs/np_datetime.pxd
+++ b/pandas/_libs/tslibs/np_datetime.pxd
@@ -1,6 +1,6 @@
from cpython.datetime cimport date, datetime
+from numpy cimport int32_t, int64_t
-from numpy cimport int64_t, int32_t
cdef extern from "numpy/ndarrayobject.h":
ctypedef int64_t npy_timedelta
diff --git a/pandas/_libs/tslibs/offsets.pxd b/pandas/_libs/tslibs/offsets.pxd
index 9a9244db4a565..215c3f849281f 100644
--- a/pandas/_libs/tslibs/offsets.pxd
+++ b/pandas/_libs/tslibs/offsets.pxd
@@ -1,5 +1,6 @@
from numpy cimport int64_t
+
cpdef to_offset(object obj)
cdef bint is_offset_object(object obj)
cdef bint is_tick_object(object obj)
diff --git a/pandas/_libs/tslibs/period.pxd b/pandas/_libs/tslibs/period.pxd
index 9c0342e239a89..46c6e52cb9156 100644
--- a/pandas/_libs/tslibs/period.pxd
+++ b/pandas/_libs/tslibs/period.pxd
@@ -2,5 +2,6 @@ from numpy cimport int64_t
from .np_datetime cimport npy_datetimestruct
+
cdef bint is_period_object(object obj)
cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil
diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd
index 4142861e9ad38..fed1f2d326819 100644
--- a/pandas/_libs/tslibs/timedeltas.pxd
+++ b/pandas/_libs/tslibs/timedeltas.pxd
@@ -1,6 +1,7 @@
from cpython.datetime cimport timedelta
from numpy cimport int64_t
+
# Exposed for tslib, not intended for outside use.
cpdef int64_t delta_to_nanoseconds(delta) except? -1
cdef convert_to_timedelta64(object ts, str unit)
diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd
index 307b6dfc90715..755cf3fc940b8 100644
--- a/pandas/_libs/tslibs/timestamps.pxd
+++ b/pandas/_libs/tslibs/timestamps.pxd
@@ -1,5 +1,4 @@
from cpython.datetime cimport datetime, tzinfo
-
from numpy cimport int64_t
from pandas._libs.tslibs.base cimport ABCTimestamp
diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd
index 136710003d32a..753c881ed505c 100644
--- a/pandas/_libs/tslibs/timezones.pxd
+++ b/pandas/_libs/tslibs/timezones.pxd
@@ -1,5 +1,6 @@
from cpython.datetime cimport datetime, timedelta, tzinfo
+
cdef tzinfo utc_pytz
cpdef bint is_utc(tzinfo tz)
diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd
index e280609bb17a7..b5e89e5ed3ef4 100644
--- a/pandas/_libs/tslibs/util.pxd
+++ b/pandas/_libs/tslibs/util.pxd
@@ -1,6 +1,7 @@
from cpython.object cimport PyTypeObject
+
cdef extern from *:
"""
PyObject* char_to_string(const char* data) {
@@ -26,7 +27,8 @@ cdef extern from "Python.h":
const char* PyUnicode_AsUTF8AndSize(object obj,
Py_ssize_t* length) except NULL
-from numpy cimport int64_t, float64_t
+from numpy cimport float64_t, int64_t
+
cdef extern from "numpy/arrayobject.h":
PyTypeObject PyFloatingArrType_Type
diff --git a/pandas/_libs/util.pxd b/pandas/_libs/util.pxd
index 828bccf7d5641..7394605722103 100644
--- a/pandas/_libs/util.pxd
+++ b/pandas/_libs/util.pxd
@@ -1,8 +1,9 @@
-from pandas._libs.tslibs.util cimport *
-
cimport numpy as cnp
from numpy cimport ndarray
+from pandas._libs.tslibs.util cimport *
+
+
cdef extern from "numpy/ndarraytypes.h":
void PyArray_CLEARFLAGS(ndarray arr, int flags) nogil
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 67cbbac47bc68..e235d503cfd14 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -4441,14 +4441,7 @@ def test_week_add_invalid():
@pytest.mark.parametrize(
- "attribute",
- [
- "hours",
- "days",
- "weeks",
- "months",
- "years",
- ],
+ "attribute", ["hours", "days", "weeks", "months", "years"],
)
def test_dateoffset_immutable(attribute):
offset = DateOffset(**{attribute: 0})
| isort-5.6.1 | https://api.github.com/repos/pandas-dev/pandas/pulls/37004 | 2020-10-09T12:03:56Z | 2020-10-09T14:36:25Z | 2020-10-09T14:36:25Z | 2020-10-09T14:36:31Z |
Backport PR #36753 on branch 1.1.x: BUG: Segfault with string Index when using Rolling after Groupby | diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index f7bcd1e795fd3..617c43e0a59ed 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -409,7 +409,7 @@ def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
- final.append(Series(self._on, index=obj.index, name=name))
+ final.append(Series(self._on, index=self.obj.index, name=name))
if self._selection is not None:
@@ -2259,7 +2259,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer:
"""
rolling_indexer: Type[BaseIndexer]
indexer_kwargs: Optional[Dict] = None
- index_array = self.obj.index.asi8
+ index_array = self._on.asi8
if isinstance(self.window, BaseIndexer):
rolling_indexer = type(self.window)
indexer_kwargs = self.window.__dict__
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index f24317b9bbaa8..074cdb307f7ee 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -418,6 +418,35 @@ def test_groupby_rolling_empty_frame(self):
expected.index = pd.MultiIndex.from_tuples([], names=["s1", "s2", None])
tm.assert_frame_equal(result, expected)
+ def test_groupby_rolling_string_index(self):
+ # GH: 36727
+ df = pd.DataFrame(
+ [
+ ["A", "group_1", pd.Timestamp(2019, 1, 1, 9)],
+ ["B", "group_1", pd.Timestamp(2019, 1, 2, 9)],
+ ["Z", "group_2", pd.Timestamp(2019, 1, 3, 9)],
+ ["H", "group_1", pd.Timestamp(2019, 1, 6, 9)],
+ ["E", "group_2", pd.Timestamp(2019, 1, 20, 9)],
+ ],
+ columns=["index", "group", "eventTime"],
+ ).set_index("index")
+
+ groups = df.groupby("group")
+ df["count_to_date"] = groups.cumcount()
+ rolling_groups = groups.rolling("10d", on="eventTime")
+ result = rolling_groups.apply(lambda df: df.shape[0])
+ expected = pd.DataFrame(
+ [
+ ["A", "group_1", pd.Timestamp(2019, 1, 1, 9), 1.0],
+ ["B", "group_1", pd.Timestamp(2019, 1, 2, 9), 2.0],
+ ["H", "group_1", pd.Timestamp(2019, 1, 6, 9), 3.0],
+ ["Z", "group_2", pd.Timestamp(2019, 1, 3, 9), 1.0],
+ ["E", "group_2", pd.Timestamp(2019, 1, 20, 9), 1.0],
+ ],
+ columns=["index", "group", "eventTime", "count_to_date"],
+ ).set_index(["group", "index"])
+ tm.assert_frame_equal(result, expected)
+
def test_groupby_rolling_no_sort(self):
# GH 36889
result = (
| Backport PR #36753 on branch 1.1.x | https://api.github.com/repos/pandas-dev/pandas/pulls/37003 | 2020-10-09T11:54:20Z | 2020-10-09T13:12:10Z | 2020-10-09T13:12:10Z | 2020-10-09T13:12:16Z |
Backport PR #36911: BUG: RollingGroupby not respecting sort=False | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index d0d03021629c6..f9127ee8d13e7 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -15,6 +15,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
- Fixed regression where attempting to mutate a :class:`DateOffset` object would no longer raise an ``AttributeError`` (:issue:`36940`)
+- Fixed regression in :class:`RollingGroupby` with ``sort=False`` not being respected (:issue:`36889`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py
index 272afe7335c6a..c552b587e036e 100644
--- a/pandas/core/groupby/grouper.py
+++ b/pandas/core/groupby/grouper.py
@@ -556,8 +556,13 @@ def indices(self):
if isinstance(self.grouper, ops.BaseGrouper):
return self.grouper.indices
- values = Categorical(self.grouper)
- return values._reverse_indexer()
+ # Return a dictionary of {group label: [indices belonging to the group label]}
+ # respecting whether sort was specified
+ codes, uniques = algorithms.factorize(self.grouper, sort=self.sort)
+ return {
+ category: np.flatnonzero(codes == i)
+ for i, category in enumerate(Index(uniques))
+ }
@property
def codes(self) -> np.ndarray:
diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py
index 806c22c60b48f..f24317b9bbaa8 100644
--- a/pandas/tests/window/test_grouper.py
+++ b/pandas/tests/window/test_grouper.py
@@ -417,3 +417,18 @@ def test_groupby_rolling_empty_frame(self):
result = expected.groupby(["s1", "s2"]).rolling(window=1).sum()
expected.index = pd.MultiIndex.from_tuples([], names=["s1", "s2", None])
tm.assert_frame_equal(result, expected)
+
+ def test_groupby_rolling_no_sort(self):
+ # GH 36889
+ result = (
+ pd.DataFrame({"foo": [2, 1], "bar": [2, 1]})
+ .groupby("foo", sort=False)
+ .rolling(1)
+ .min()
+ )
+ expected = pd.DataFrame(
+ np.array([[2.0, 2.0], [1.0, 1.0]]),
+ columns=["foo", "bar"],
+ index=pd.MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]),
+ )
+ tm.assert_frame_equal(result, expected)
| Backport PR #36911 | https://api.github.com/repos/pandas-dev/pandas/pulls/37002 | 2020-10-09T11:29:48Z | 2020-10-09T12:19:46Z | 2020-10-09T12:19:46Z | 2020-10-09T12:19:52Z |
CLN: Move _aggregate and _aggregate_multiple_funcs to core.aggregation | diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py
index ad69e9f31e065..74359c8831745 100644
--- a/pandas/core/aggregation.py
+++ b/pandas/core/aggregation.py
@@ -29,10 +29,11 @@
Label,
)
+from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import is_dict_like, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
-from pandas.core.base import SpecificationError
+from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.indexes.api import Index
@@ -525,3 +526,297 @@ def transform_str_or_callable(
return obj.apply(func, args=args, **kwargs)
except Exception:
return func(obj, *args, **kwargs)
+
+
+def aggregate(obj, arg: AggFuncType, *args, **kwargs):
+ """
+ provide an implementation for the aggregators
+
+ Parameters
+ ----------
+ arg : string, dict, function
+ *args : args to pass on to the function
+ **kwargs : kwargs to pass on to the function
+
+ Returns
+ -------
+ tuple of result, how
+
+ Notes
+ -----
+ how can be a string describe the required post-processing, or
+ None if not required
+ """
+ is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
+
+ _axis = kwargs.pop("_axis", None)
+ if _axis is None:
+ _axis = getattr(obj, "axis", 0)
+
+ if isinstance(arg, str):
+ return obj._try_aggregate_string_function(arg, *args, **kwargs), None
+
+ if isinstance(arg, dict):
+ # aggregate based on the passed dict
+ if _axis != 0: # pragma: no cover
+ raise ValueError("Can only pass dict with axis=0")
+
+ selected_obj = obj._selected_obj
+
+ # if we have a dict of any non-scalars
+ # eg. {'A' : ['mean']}, normalize all to
+ # be list-likes
+ if any(is_aggregator(x) for x in arg.values()):
+ new_arg: Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]] = {}
+ for k, v in arg.items():
+ if not isinstance(v, (tuple, list, dict)):
+ new_arg[k] = [v]
+ else:
+ new_arg[k] = v
+
+ # the keys must be in the columns
+ # for ndim=2, or renamers for ndim=1
+
+ # ok for now, but deprecated
+ # {'A': { 'ra': 'mean' }}
+ # {'A': { 'ra': ['mean'] }}
+ # {'ra': ['mean']}
+
+ # not ok
+ # {'ra' : { 'A' : 'mean' }}
+ if isinstance(v, dict):
+ raise SpecificationError("nested renamer is not supported")
+ elif isinstance(selected_obj, ABCSeries):
+ raise SpecificationError("nested renamer is not supported")
+ elif (
+ isinstance(selected_obj, ABCDataFrame)
+ and k not in selected_obj.columns
+ ):
+ raise KeyError(f"Column '{k}' does not exist!")
+
+ arg = new_arg
+
+ else:
+ # deprecation of renaming keys
+ # GH 15931
+ keys = list(arg.keys())
+ if isinstance(selected_obj, ABCDataFrame) and len(
+ selected_obj.columns.intersection(keys)
+ ) != len(keys):
+ cols = sorted(set(keys) - set(selected_obj.columns.intersection(keys)))
+ raise SpecificationError(f"Column(s) {cols} do not exist")
+
+ from pandas.core.reshape.concat import concat
+
+ def _agg_1dim(name, how, subset=None):
+ """
+ aggregate a 1-dim with how
+ """
+ colg = obj._gotitem(name, ndim=1, subset=subset)
+ if colg.ndim != 1:
+ raise SpecificationError(
+ "nested dictionary is ambiguous in aggregation"
+ )
+ return colg.aggregate(how)
+
+ def _agg_2dim(how):
+ """
+ aggregate a 2-dim with how
+ """
+ colg = obj._gotitem(obj._selection, ndim=2, subset=selected_obj)
+ return colg.aggregate(how)
+
+ def _agg(arg, func):
+ """
+ run the aggregations over the arg with func
+ return a dict
+ """
+ result = {}
+ for fname, agg_how in arg.items():
+ result[fname] = func(fname, agg_how)
+ return result
+
+ # set the final keys
+ keys = list(arg.keys())
+
+ if obj._selection is not None:
+
+ sl = set(obj._selection_list)
+
+ # we are a Series like object,
+ # but may have multiple aggregations
+ if len(sl) == 1:
+
+ result = _agg(
+ arg, lambda fname, agg_how: _agg_1dim(obj._selection, agg_how)
+ )
+
+ # we are selecting the same set as we are aggregating
+ elif not len(sl - set(keys)):
+
+ result = _agg(arg, _agg_1dim)
+
+ # we are a DataFrame, with possibly multiple aggregations
+ else:
+
+ result = _agg(arg, _agg_2dim)
+
+ # no selection
+ else:
+
+ try:
+ result = _agg(arg, _agg_1dim)
+ except SpecificationError:
+
+ # we are aggregating expecting all 1d-returns
+ # but we have 2d
+ result = _agg(arg, _agg_2dim)
+
+ # combine results
+
+ def is_any_series() -> bool:
+ # return a boolean if we have *any* nested series
+ return any(isinstance(r, ABCSeries) for r in result.values())
+
+ def is_any_frame() -> bool:
+ # return a boolean if we have *any* nested series
+ return any(isinstance(r, ABCDataFrame) for r in result.values())
+
+ if isinstance(result, list):
+ return concat(result, keys=keys, axis=1, sort=True), True
+
+ elif is_any_frame():
+ # we have a dict of DataFrames
+ # return a MI DataFrame
+
+ keys_to_use = [k for k in keys if not result[k].empty]
+ # Have to check, if at least one DataFrame is not empty.
+ keys_to_use = keys_to_use if keys_to_use != [] else keys
+ return (
+ concat([result[k] for k in keys_to_use], keys=keys_to_use, axis=1),
+ True,
+ )
+
+ elif isinstance(obj, ABCSeries) and is_any_series():
+
+ # we have a dict of Series
+ # return a MI Series
+ try:
+ result = concat(result)
+ except TypeError as err:
+ # we want to give a nice error here if
+ # we have non-same sized objects, so
+ # we don't automatically broadcast
+
+ raise ValueError(
+ "cannot perform both aggregation "
+ "and transformation operations "
+ "simultaneously"
+ ) from err
+
+ return result, True
+
+ # fall thru
+ from pandas import DataFrame, Series
+
+ try:
+ result = DataFrame(result)
+ except ValueError:
+ # we have a dict of scalars
+
+ # GH 36212 use name only if obj is a series
+ if obj.ndim == 1:
+ obj = cast("Series", obj)
+ name = obj.name
+ else:
+ name = None
+
+ result = Series(result, name=name)
+
+ return result, True
+ elif is_list_like(arg):
+ # we require a list, but not an 'str'
+ return aggregate_multiple_funcs(obj, arg, _axis=_axis), None
+ else:
+ result = None
+
+ if callable(arg):
+ f = obj._get_cython_func(arg)
+ if f and not args and not kwargs:
+ return getattr(obj, f)(), None
+
+ # caller can react
+ return result, True
+
+
+def aggregate_multiple_funcs(obj, arg, _axis):
+ from pandas.core.reshape.concat import concat
+
+ if _axis != 0:
+ raise NotImplementedError("axis other than 0 is not supported")
+
+ if obj._selected_obj.ndim == 1:
+ selected_obj = obj._selected_obj
+ else:
+ selected_obj = obj._obj_with_exclusions
+
+ results = []
+ keys = []
+
+ # degenerate case
+ if selected_obj.ndim == 1:
+ for a in arg:
+ colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
+ try:
+ new_res = colg.aggregate(a)
+
+ except TypeError:
+ pass
+ else:
+ results.append(new_res)
+
+ # make sure we find a good name
+ name = com.get_callable_name(a) or a
+ keys.append(name)
+
+ # multiples
+ else:
+ for index, col in enumerate(selected_obj):
+ colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
+ try:
+ new_res = colg.aggregate(arg)
+ except (TypeError, DataError):
+ pass
+ except ValueError as err:
+ # cannot aggregate
+ if "Must produce aggregated value" in str(err):
+ # raised directly in _aggregate_named
+ pass
+ elif "no results" in str(err):
+ # raised directly in _aggregate_multiple_funcs
+ pass
+ else:
+ raise
+ else:
+ results.append(new_res)
+ keys.append(col)
+
+ # if we are empty
+ if not len(results):
+ raise ValueError("no results")
+
+ try:
+ return concat(results, keys=keys, axis=1, sort=False)
+ except TypeError as err:
+
+ # we are concatting non-NDFrame objects,
+ # e.g. a list of scalars
+
+ from pandas import Series
+
+ result = Series(results, index=keys, name=obj.name)
+ if is_nested_object(result):
+ raise ValueError(
+ "cannot combine transform and aggregation operations"
+ ) from err
+ return result
diff --git a/pandas/core/base.py b/pandas/core/base.py
index 1063e742e38c8..10b83116dee58 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -4,30 +4,28 @@
import builtins
import textwrap
-from typing import Any, Callable, Dict, FrozenSet, List, Optional, TypeVar, Union, cast
+from typing import Any, Callable, Dict, FrozenSet, Optional, TypeVar, Union
import numpy as np
import pandas._libs.lib as lib
-from pandas._typing import AggFuncType, AggFuncTypeBase, IndexLabel, Label
+from pandas._typing import IndexLabel
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly, doc
-from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dict_like,
is_extension_array_dtype,
- is_list_like,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
-from pandas.core import algorithms, common as com
+from pandas.core import algorithms
from pandas.core.accessor import DirNamesMixin
from pandas.core.algorithms import duplicated, unique1d, value_counts
from pandas.core.arraylike import OpsMixin
@@ -282,300 +280,6 @@ def _try_aggregate_string_function(self, arg: str, *args, **kwargs):
f"'{arg}' is not a valid function for '{type(self).__name__}' object"
)
- def _aggregate(self, arg: AggFuncType, *args, **kwargs):
- """
- provide an implementation for the aggregators
-
- Parameters
- ----------
- arg : string, dict, function
- *args : args to pass on to the function
- **kwargs : kwargs to pass on to the function
-
- Returns
- -------
- tuple of result, how
-
- Notes
- -----
- how can be a string describe the required post-processing, or
- None if not required
- """
- is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
-
- _axis = kwargs.pop("_axis", None)
- if _axis is None:
- _axis = getattr(self, "axis", 0)
-
- if isinstance(arg, str):
- return self._try_aggregate_string_function(arg, *args, **kwargs), None
-
- if isinstance(arg, dict):
- # aggregate based on the passed dict
- if _axis != 0: # pragma: no cover
- raise ValueError("Can only pass dict with axis=0")
-
- selected_obj = self._selected_obj
-
- # if we have a dict of any non-scalars
- # eg. {'A' : ['mean']}, normalize all to
- # be list-likes
- if any(is_aggregator(x) for x in arg.values()):
- new_arg: Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]] = {}
- for k, v in arg.items():
- if not isinstance(v, (tuple, list, dict)):
- new_arg[k] = [v]
- else:
- new_arg[k] = v
-
- # the keys must be in the columns
- # for ndim=2, or renamers for ndim=1
-
- # ok for now, but deprecated
- # {'A': { 'ra': 'mean' }}
- # {'A': { 'ra': ['mean'] }}
- # {'ra': ['mean']}
-
- # not ok
- # {'ra' : { 'A' : 'mean' }}
- if isinstance(v, dict):
- raise SpecificationError("nested renamer is not supported")
- elif isinstance(selected_obj, ABCSeries):
- raise SpecificationError("nested renamer is not supported")
- elif (
- isinstance(selected_obj, ABCDataFrame)
- and k not in selected_obj.columns
- ):
- raise KeyError(f"Column '{k}' does not exist!")
-
- arg = new_arg
-
- else:
- # deprecation of renaming keys
- # GH 15931
- keys = list(arg.keys())
- if isinstance(selected_obj, ABCDataFrame) and len(
- selected_obj.columns.intersection(keys)
- ) != len(keys):
- cols = sorted(
- set(keys) - set(selected_obj.columns.intersection(keys))
- )
- raise SpecificationError(f"Column(s) {cols} do not exist")
-
- from pandas.core.reshape.concat import concat
-
- def _agg_1dim(name, how, subset=None):
- """
- aggregate a 1-dim with how
- """
- colg = self._gotitem(name, ndim=1, subset=subset)
- if colg.ndim != 1:
- raise SpecificationError(
- "nested dictionary is ambiguous in aggregation"
- )
- return colg.aggregate(how)
-
- def _agg_2dim(how):
- """
- aggregate a 2-dim with how
- """
- colg = self._gotitem(self._selection, ndim=2, subset=selected_obj)
- return colg.aggregate(how)
-
- def _agg(arg, func):
- """
- run the aggregations over the arg with func
- return a dict
- """
- result = {}
- for fname, agg_how in arg.items():
- result[fname] = func(fname, agg_how)
- return result
-
- # set the final keys
- keys = list(arg.keys())
-
- if self._selection is not None:
-
- sl = set(self._selection_list)
-
- # we are a Series like object,
- # but may have multiple aggregations
- if len(sl) == 1:
-
- result = _agg(
- arg, lambda fname, agg_how: _agg_1dim(self._selection, agg_how)
- )
-
- # we are selecting the same set as we are aggregating
- elif not len(sl - set(keys)):
-
- result = _agg(arg, _agg_1dim)
-
- # we are a DataFrame, with possibly multiple aggregations
- else:
-
- result = _agg(arg, _agg_2dim)
-
- # no selection
- else:
-
- try:
- result = _agg(arg, _agg_1dim)
- except SpecificationError:
-
- # we are aggregating expecting all 1d-returns
- # but we have 2d
- result = _agg(arg, _agg_2dim)
-
- # combine results
-
- def is_any_series() -> bool:
- # return a boolean if we have *any* nested series
- return any(isinstance(r, ABCSeries) for r in result.values())
-
- def is_any_frame() -> bool:
- # return a boolean if we have *any* nested series
- return any(isinstance(r, ABCDataFrame) for r in result.values())
-
- if isinstance(result, list):
- return concat(result, keys=keys, axis=1, sort=True), True
-
- elif is_any_frame():
- # we have a dict of DataFrames
- # return a MI DataFrame
-
- keys_to_use = [k for k in keys if not result[k].empty]
- # Have to check, if at least one DataFrame is not empty.
- keys_to_use = keys_to_use if keys_to_use != [] else keys
- return (
- concat([result[k] for k in keys_to_use], keys=keys_to_use, axis=1),
- True,
- )
-
- elif isinstance(self, ABCSeries) and is_any_series():
-
- # we have a dict of Series
- # return a MI Series
- try:
- result = concat(result)
- except TypeError as err:
- # we want to give a nice error here if
- # we have non-same sized objects, so
- # we don't automatically broadcast
-
- raise ValueError(
- "cannot perform both aggregation "
- "and transformation operations "
- "simultaneously"
- ) from err
-
- return result, True
-
- # fall thru
- from pandas import DataFrame, Series
-
- try:
- result = DataFrame(result)
- except ValueError:
- # we have a dict of scalars
-
- # GH 36212 use name only if self is a series
- if self.ndim == 1:
- self = cast("Series", self)
- name = self.name
- else:
- name = None
-
- result = Series(result, name=name)
-
- return result, True
- elif is_list_like(arg):
- # we require a list, but not an 'str'
- return self._aggregate_multiple_funcs(arg, _axis=_axis), None
- else:
- result = None
-
- if callable(arg):
- f = self._get_cython_func(arg)
- if f and not args and not kwargs:
- return getattr(self, f)(), None
-
- # caller can react
- return result, True
-
- def _aggregate_multiple_funcs(self, arg, _axis):
- from pandas.core.reshape.concat import concat
-
- if _axis != 0:
- raise NotImplementedError("axis other than 0 is not supported")
-
- if self._selected_obj.ndim == 1:
- selected_obj = self._selected_obj
- else:
- selected_obj = self._obj_with_exclusions
-
- results = []
- keys = []
-
- # degenerate case
- if selected_obj.ndim == 1:
- for a in arg:
- colg = self._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
- try:
- new_res = colg.aggregate(a)
-
- except TypeError:
- pass
- else:
- results.append(new_res)
-
- # make sure we find a good name
- name = com.get_callable_name(a) or a
- keys.append(name)
-
- # multiples
- else:
- for index, col in enumerate(selected_obj):
- colg = self._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
- try:
- new_res = colg.aggregate(arg)
- except (TypeError, DataError):
- pass
- except ValueError as err:
- # cannot aggregate
- if "Must produce aggregated value" in str(err):
- # raised directly in _aggregate_named
- pass
- elif "no results" in str(err):
- # raised directly in _aggregate_multiple_funcs
- pass
- else:
- raise
- else:
- results.append(new_res)
- keys.append(col)
-
- # if we are empty
- if not len(results):
- raise ValueError("no results")
-
- try:
- return concat(results, keys=keys, axis=1, sort=False)
- except TypeError as err:
-
- # we are concatting non-NDFrame objects,
- # e.g. a list of scalars
-
- from pandas import Series
-
- result = Series(results, index=keys, name=self.name)
- if is_nested_object(result):
- raise ValueError(
- "cannot combine transform and aggregation operations"
- ) from err
- return result
-
def _get_cython_func(self, arg: Callable) -> Optional[str]:
"""
if we define an internal function for this argument, return it
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 80e9ec5076610..1e3ae3eb41b8d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -118,7 +118,12 @@
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
-from pandas.core.aggregation import reconstruct_func, relabel_result, transform
+from pandas.core.aggregation import (
+ aggregate,
+ reconstruct_func,
+ relabel_result,
+ transform,
+)
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
@@ -7434,10 +7439,10 @@ def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
- result, how = self.T._aggregate(arg, *args, **kwargs)
+ result, how = aggregate(self.T, arg, *args, **kwargs)
result = result.T if result is not None else result
return result, how
- return super()._aggregate(arg, *args, **kwargs)
+ return aggregate(self, arg, *args, **kwargs)
agg = aggregate
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index e7e812737d48e..af3aa5d121391 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -54,6 +54,8 @@
from pandas.core.dtypes.missing import isna, notna
from pandas.core.aggregation import (
+ aggregate,
+ aggregate_multiple_funcs,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
@@ -946,7 +948,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
- result, how = self._aggregate(func, *args, **kwargs)
+ result, how = aggregate(self, func, *args, **kwargs)
if how is None:
return result
@@ -966,7 +968,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
# try to treat as if we are passing a list
try:
- result = self._aggregate_multiple_funcs([func], _axis=self.axis)
+ result = aggregate_multiple_funcs(self, [func], _axis=self.axis)
# select everything except for the last level, which is the one
# containing the name of the function(s), see GH 32040
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index f881f79cb5c1d..3f1b1dac080a7 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -21,6 +21,7 @@
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
+from pandas.core.aggregation import aggregate
import pandas.core.algorithms as algos
from pandas.core.base import DataError
from pandas.core.generic import NDFrame, _shared_docs
@@ -288,7 +289,7 @@ def pipe(self, func, *args, **kwargs):
def aggregate(self, func, *args, **kwargs):
self._set_binner()
- result, how = self._aggregate(func, *args, **kwargs)
+ result, how = aggregate(self, func, *args, **kwargs)
if result is None:
how = func
grouper = None
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 9bd41ca0e76db..a2a6023bf4626 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -70,7 +70,7 @@
import pandas as pd
from pandas.core import algorithms, base, generic, nanops, ops
from pandas.core.accessor import CachedAccessor
-from pandas.core.aggregation import transform
+from pandas.core.aggregation import aggregate, transform
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.categorical import CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
@@ -4019,7 +4019,7 @@ def aggregate(self, func=None, axis=0, *args, **kwargs):
if func is None:
func = dict(kwargs.items())
- result, how = self._aggregate(func, *args, **kwargs)
+ result, how = aggregate(self, func, *args, **kwargs)
if result is None:
# we can be called from an inner function which
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index af3bba4edf343..466b320f1771f 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -47,6 +47,7 @@
)
from pandas.core.dtypes.missing import notna
+from pandas.core.aggregation import aggregate
from pandas.core.base import DataError, SelectionMixin
import pandas.core.common as com
from pandas.core.construction import extract_array
@@ -618,7 +619,7 @@ def calc(x):
return self._apply_blockwise(homogeneous_func, name)
def aggregate(self, func, *args, **kwargs):
- result, how = self._aggregate(func, *args, **kwargs)
+ result, how = aggregate(self, func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
@@ -1183,7 +1184,7 @@ def _get_window(
axis="",
)
def aggregate(self, func, *args, **kwargs):
- result, how = self._aggregate(func, *args, **kwargs)
+ result, how = aggregate(self, func, *args, **kwargs)
if result is None:
# these must apply directly
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Renamed functions by removing underscores and renamed `self` argument to `obj`. | https://api.github.com/repos/pandas-dev/pandas/pulls/36999 | 2020-10-09T02:52:36Z | 2020-10-09T14:39:39Z | 2020-10-09T14:39:39Z | 2020-10-11T13:22:02Z |
BUG: DataFrame.diff with dt64 and NaTs | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 0ab95dd260a9c..32e65fbe8d54c 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -360,6 +360,7 @@ Numeric
- Bug in :meth:`DataFrame.__rmatmul__` error handling reporting transposed shapes (:issue:`21581`)
- Bug in :class:`Series` flex arithmetic methods where the result when operating with a ``list``, ``tuple`` or ``np.ndarray`` would have an incorrect name (:issue:`36760`)
- Bug in :class:`IntegerArray` multiplication with ``timedelta`` and ``np.timedelta64`` objects (:issue:`36870`)
+- Bug in :meth:`DataFrame.diff` with ``datetime64`` dtypes including ``NaT`` values failing to fill ``NaT`` results correctly (:issue:`32441`)
Conversion
^^^^^^^^^^
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index be105f0035447..f4b9714dd8ba1 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2079,7 +2079,7 @@ def _can_hold_element(self, element: Any) -> bool:
return is_integer(element) or (is_float(element) and element.is_integer())
-class DatetimeLikeBlockMixin:
+class DatetimeLikeBlockMixin(Block):
"""Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock."""
@property
@@ -2111,6 +2111,32 @@ def iget(self, key):
# TODO(EA2D): this can be removed if we ever have 2D EA
return self.array_values().reshape(self.shape)[key]
+ def diff(self, n: int, axis: int = 0) -> List["Block"]:
+ """
+ 1st discrete difference.
+
+ Parameters
+ ----------
+ n : int
+ Number of periods to diff.
+ axis : int, default 0
+ Axis to diff upon.
+
+ Returns
+ -------
+ A list with a new TimeDeltaBlock.
+
+ Notes
+ -----
+ The arguments here are mimicking shift so they are called correctly
+ by apply.
+ """
+ # TODO(EA2D): reshape not necessary with 2D EAs
+ values = self.array_values().reshape(self.shape)
+
+ new_values = values - values.shift(n, axis=axis)
+ return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
+
def shift(self, periods, axis=0, fill_value=None):
# TODO(EA2D) this is unnecessary if these blocks are backed by 2D EAs
values = self.array_values()
@@ -2118,7 +2144,7 @@ def shift(self, periods, axis=0, fill_value=None):
return self.make_block_same_class(new_values)
-class DatetimeBlock(DatetimeLikeBlockMixin, Block):
+class DatetimeBlock(DatetimeLikeBlockMixin):
__slots__ = ()
is_datetime = True
@@ -2220,6 +2246,7 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock):
internal_values = Block.internal_values
_can_hold_element = DatetimeBlock._can_hold_element
to_native_types = DatetimeBlock.to_native_types
+ diff = DatetimeBlock.diff
fill_value = np.datetime64("NaT", "ns")
array_values = ExtensionBlock.array_values
@@ -2291,43 +2318,6 @@ def external_values(self):
# return an object-dtype ndarray of Timestamps.
return np.asarray(self.values.astype("datetime64[ns]", copy=False))
- def diff(self, n: int, axis: int = 0) -> List["Block"]:
- """
- 1st discrete difference.
-
- Parameters
- ----------
- n : int
- Number of periods to diff.
- axis : int, default 0
- Axis to diff upon.
-
- Returns
- -------
- A list with a new TimeDeltaBlock.
-
- Notes
- -----
- The arguments here are mimicking shift so they are called correctly
- by apply.
- """
- if axis == 0:
- # TODO(EA2D): special case not needed with 2D EAs
- # Cannot currently calculate diff across multiple blocks since this
- # function is invoked via apply
- raise NotImplementedError
-
- if n == 0:
- # Fastpath avoids making a copy in `shift`
- new_values = np.zeros(self.values.shape, dtype=np.int64)
- else:
- new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8
-
- # Reshape the new_values like how algos.diff does for timedelta data
- new_values = new_values.reshape(1, len(new_values))
- new_values = new_values.astype("timedelta64[ns]")
- return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]
-
def fillna(self, value, limit=None, inplace=False, downcast=None):
# We support filling a DatetimeTZ with a `value` whose timezone
# is different by coercing to object.
diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py
index 42586c14092f2..e160d5d24d40a 100644
--- a/pandas/tests/frame/methods/test_diff.py
+++ b/pandas/tests/frame/methods/test_diff.py
@@ -39,6 +39,60 @@ def test_diff(self, datetime_frame):
expected = pd.DataFrame({"x": np.nan, "y": pd.Series(1), "z": pd.Series(1)})
tm.assert_frame_equal(result, expected)
+ def test_diff_timedelta64_with_nat(self):
+ # GH#32441
+ arr = np.arange(6).reshape(3, 2).astype("timedelta64[ns]")
+ arr[:, 0] = np.timedelta64("NaT", "ns")
+
+ df = pd.DataFrame(arr)
+ result = df.diff(1, axis=0)
+
+ expected = pd.DataFrame(
+ {0: df[0], 1: [pd.NaT, pd.Timedelta(2), pd.Timedelta(2)]}
+ )
+ tm.assert_equal(result, expected)
+
+ result = df.diff(0)
+ expected = df - df
+ assert expected[0].isna().all()
+ tm.assert_equal(result, expected)
+
+ result = df.diff(-1, axis=1)
+ expected = df * np.nan
+ tm.assert_equal(result, expected)
+
+ @pytest.mark.parametrize("tz", [None, "UTC"])
+ def test_diff_datetime_axis0_with_nat(self, tz):
+ # GH#32441
+ dti = pd.DatetimeIndex(["NaT", "2019-01-01", "2019-01-02"], tz=tz)
+ ser = pd.Series(dti)
+
+ df = ser.to_frame()
+
+ result = df.diff()
+ ex_index = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta(days=1)])
+ expected = pd.Series(ex_index).to_frame()
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("tz", [None, "UTC"])
+ def test_diff_datetime_with_nat_zero_periods(self, tz):
+ # diff on NaT values should give NaT, not timedelta64(0)
+ dti = pd.date_range("2016-01-01", periods=4, tz=tz)
+ ser = pd.Series(dti)
+ df = ser.to_frame()
+
+ df[1] = ser.copy()
+ df.iloc[:, 0] = pd.NaT
+
+ expected = df - df
+ assert expected[0].isna().all()
+
+ result = df.diff(0, axis=0)
+ tm.assert_frame_equal(result, expected)
+
+ result = df.diff(0, axis=1)
+ tm.assert_frame_equal(result, expected)
+
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_diff_datetime_axis0(self, tz):
# GH#18578
| - [x] closes #32441
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
There's a related bug in algos.diff, but the elegant solution goes through the cython code, so im doing that in a separate branch. | https://api.github.com/repos/pandas-dev/pandas/pulls/36998 | 2020-10-09T02:20:07Z | 2020-10-10T17:40:57Z | 2020-10-10T17:40:56Z | 2020-10-10T18:09:04Z |
REF/BUG/TYP: read_csv shouldn't close user-provided file handles | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 16e6c12488b83..83a94bcbd9c79 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -493,6 +493,7 @@ I/O
- Bug in output rendering of complex numbers showing too many trailing zeros (:issue:`36799`)
- Bug in :class:`HDFStore` threw a ``TypeError`` when exporting an empty :class:`DataFrame` with ``datetime64[ns, tz]`` dtypes with a fixed HDF5 store (:issue:`20594`)
- Bug in :class:`HDFStore` was dropping timezone information when exporting :class:`Series` with ``datetime64[ns, tz]`` dtypes with a fixed HDF5 store (:issue:`20594`)
+- :func:`read_csv` was closing user-provided binary file handles when ``engine="c"`` and an ``encoding`` was requested (:issue:`36980`)
- Bug in :meth:`DataFrame.to_hdf` was not dropping missing rows with ``dropna=True`` (:issue:`35719`)
Plotting
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index b87e46f9b6648..4b7a47c5f93c2 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -1,15 +1,10 @@
# Copyright (c) 2012, Lambda Foundry, Inc.
# See LICENSE for the license
-import bz2
from csv import QUOTE_MINIMAL, QUOTE_NONE, QUOTE_NONNUMERIC
from errno import ENOENT
-import gzip
-import io
-import os
import sys
import time
import warnings
-import zipfile
from libc.stdlib cimport free
from libc.string cimport strcasecmp, strlen, strncpy
@@ -17,7 +12,7 @@ from libc.string cimport strcasecmp, strlen, strncpy
import cython
from cython import Py_ssize_t
-from cpython.bytes cimport PyBytes_AsString, PyBytes_FromString
+from cpython.bytes cimport PyBytes_AsString
from cpython.exc cimport PyErr_Fetch, PyErr_Occurred
from cpython.object cimport PyObject
from cpython.ref cimport Py_XDECREF
@@ -67,7 +62,6 @@ from pandas._libs.khash cimport (
khiter_t,
)
-from pandas.compat import get_lzma_file, import_lzma
from pandas.errors import DtypeWarning, EmptyDataError, ParserError, ParserWarning
from pandas.core.dtypes.common import (
@@ -82,11 +76,10 @@ from pandas.core.dtypes.common import (
)
from pandas.core.dtypes.concat import union_categoricals
-lzma = import_lzma()
-
cdef:
float64_t INF = <float64_t>np.inf
float64_t NEGINF = -INF
+ int64_t DEFAULT_CHUNKSIZE = 256 * 1024
cdef extern from "headers/portable.h":
@@ -275,14 +268,15 @@ cdef extern from "parser/io.h":
size_t *bytes_read, int *status)
-DEFAULT_CHUNKSIZE = 256 * 1024
-
-
cdef class TextReader:
"""
# source: StringIO or file object
+ ..versionchange:: 1.2.0
+ removed 'compression', 'memory_map', and 'encoding' argument.
+ These arguments are outsourced to CParserWrapper.
+ 'source' has to be a file handle.
"""
cdef:
@@ -299,7 +293,7 @@ cdef class TextReader:
cdef public:
int64_t leading_cols, table_width, skipfooter, buffer_lines
- bint allow_leading_cols, mangle_dupe_cols, memory_map, low_memory
+ bint allow_leading_cols, mangle_dupe_cols, low_memory
bint delim_whitespace
object delimiter, converters
object na_values
@@ -307,8 +301,6 @@ cdef class TextReader:
object index_col
object skiprows
object dtype
- object encoding
- object compression
object usecols
list dtype_cast_order
set unnamed_cols
@@ -321,10 +313,8 @@ cdef class TextReader:
header_end=0,
index_col=None,
names=None,
- bint memory_map=False,
tokenize_chunksize=DEFAULT_CHUNKSIZE,
bint delim_whitespace=False,
- compression=None,
converters=None,
bint skipinitialspace=False,
escapechar=None,
@@ -332,7 +322,6 @@ cdef class TextReader:
quotechar=b'"',
quoting=0,
lineterminator=None,
- encoding=None,
comment=None,
decimal=b'.',
thousands=None,
@@ -356,15 +345,7 @@ cdef class TextReader:
bint skip_blank_lines=True):
# set encoding for native Python and C library
- if encoding is not None:
- if not isinstance(encoding, bytes):
- encoding = encoding.encode('utf-8')
- encoding = encoding.lower()
- self.c_encoding = <char*>encoding
- else:
- self.c_encoding = NULL
-
- self.encoding = encoding
+ self.c_encoding = NULL
self.parser = parser_new()
self.parser.chunksize = tokenize_chunksize
@@ -374,9 +355,6 @@ cdef class TextReader:
# For timekeeping
self.clocks = []
- self.compression = compression
- self.memory_map = memory_map
-
self.parser.usecols = (usecols is not None)
self._setup_parser_source(source)
@@ -562,11 +540,6 @@ cdef class TextReader:
parser_del(self.parser)
def close(self):
- # we need to properly close an open derived
- # filehandle here, e.g. and UTFRecoder
- if self.handle is not None:
- self.handle.close()
-
# also preemptively free all allocated memory
parser_free(self.parser)
if self.true_set:
@@ -614,82 +587,15 @@ cdef class TextReader:
cdef:
void *ptr
- self.parser.cb_io = NULL
- self.parser.cb_cleanup = NULL
-
- if self.compression:
- if self.compression == 'gzip':
- if isinstance(source, str):
- source = gzip.GzipFile(source, 'rb')
- else:
- source = gzip.GzipFile(fileobj=source)
- elif self.compression == 'bz2':
- source = bz2.BZ2File(source, 'rb')
- elif self.compression == 'zip':
- zip_file = zipfile.ZipFile(source)
- zip_names = zip_file.namelist()
-
- if len(zip_names) == 1:
- file_name = zip_names.pop()
- source = zip_file.open(file_name)
-
- elif len(zip_names) == 0:
- raise ValueError(f'Zero files found in compressed '
- f'zip file {source}')
- else:
- raise ValueError(f'Multiple files found in compressed '
- f'zip file {zip_names}')
- elif self.compression == 'xz':
- if isinstance(source, str):
- source = get_lzma_file(lzma)(source, 'rb')
- else:
- source = get_lzma_file(lzma)(filename=source)
- else:
- raise ValueError(f'Unrecognized compression type: '
- f'{self.compression}')
-
- if (self.encoding and hasattr(source, "read") and
- not hasattr(source, "encoding")):
- source = io.TextIOWrapper(
- source, self.encoding.decode('utf-8'), newline='')
-
- self.encoding = b'utf-8'
- self.c_encoding = <char*>self.encoding
-
- self.handle = source
-
- if isinstance(source, str):
- encoding = sys.getfilesystemencoding() or "utf-8"
- usource = source
- source = source.encode(encoding)
-
- if self.memory_map:
- ptr = new_mmap(source)
- if ptr == NULL:
- # fall back
- ptr = new_file_source(source, self.parser.chunksize)
- self.parser.cb_io = &buffer_file_bytes
- self.parser.cb_cleanup = &del_file_source
- else:
- self.parser.cb_io = &buffer_mmap_bytes
- self.parser.cb_cleanup = &del_mmap
- else:
- ptr = new_file_source(source, self.parser.chunksize)
- self.parser.cb_io = &buffer_file_bytes
- self.parser.cb_cleanup = &del_file_source
- self.parser.source = ptr
-
- elif hasattr(source, 'read'):
- # e.g., StringIO
-
- ptr = new_rd_source(source)
- self.parser.source = ptr
- self.parser.cb_io = &buffer_rd_bytes
- self.parser.cb_cleanup = &del_rd_source
- else:
+ if not hasattr(source, "read"):
raise IOError(f'Expected file path name or file-like object, '
f'got {type(source)} type')
+ ptr = new_rd_source(source)
+ self.parser.source = ptr
+ self.parser.cb_io = &buffer_rd_bytes
+ self.parser.cb_cleanup = &del_rd_source
+
cdef _get_header(self):
# header is now a list of lists, so field_count should use header[0]
diff --git a/pandas/_typing.py b/pandas/_typing.py
index 3376559fb23ff..3e89cf24632e2 100644
--- a/pandas/_typing.py
+++ b/pandas/_typing.py
@@ -1,6 +1,6 @@
-from dataclasses import dataclass
from datetime import datetime, timedelta, tzinfo
-from io import IOBase
+from io import BufferedIOBase, RawIOBase, TextIOBase, TextIOWrapper
+from mmap import mmap
from pathlib import Path
from typing import (
IO,
@@ -10,7 +10,6 @@
Callable,
Collection,
Dict,
- Generic,
Hashable,
List,
Mapping,
@@ -77,8 +76,6 @@
"ExtensionDtype", str, np.dtype, Type[Union[str, float, int, complex, bool, object]]
]
DtypeObj = Union[np.dtype, "ExtensionDtype"]
-FilePathOrBuffer = Union[str, Path, IO[AnyStr], IOBase]
-FileOrBuffer = Union[str, IO[AnyStr], IOBase]
# FrameOrSeriesUnion means either a DataFrame or a Series. E.g.
# `def func(a: FrameOrSeriesUnion) -> FrameOrSeriesUnion: ...` means that if a Series
@@ -133,6 +130,10 @@
"Resampler",
]
+# filenames and file-like-objects
+Buffer = Union[IO[AnyStr], RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]
+FileOrBuffer = Union[str, Buffer[T]]
+FilePathOrBuffer = Union[Path, FileOrBuffer[T]]
# for arbitrary kwargs passed during reading/writing files
StorageOptions = Optional[Dict[str, Any]]
@@ -150,21 +151,3 @@
# type of float formatter in DataFrameFormatter
FloatFormatType = Union[str, Callable, "EngFormatter"]
-
-
-@dataclass
-class IOargs(Generic[ModeVar, EncodingVar]):
- """
- Return value of io/common.py:get_filepath_or_buffer.
-
- Note (copy&past from io/parsers):
- filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
- though mypy handling of conditional imports is difficult.
- See https://github.com/python/mypy/issues/1297
- """
-
- filepath_or_buffer: FileOrBuffer
- encoding: EncodingVar
- compression: CompressionDict
- should_close: bool
- mode: Union[ModeVar, str]
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 24b89085ac121..a3130ec27713d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -15,6 +15,7 @@
import datetime
from io import StringIO
import itertools
+import mmap
from textwrap import dedent
from typing import (
IO,
@@ -2286,10 +2287,9 @@ def to_markdown(
if buf is None:
return result
ioargs = get_filepath_or_buffer(buf, mode=mode, storage_options=storage_options)
- assert not isinstance(ioargs.filepath_or_buffer, str)
+ assert not isinstance(ioargs.filepath_or_buffer, (str, mmap.mmap))
ioargs.filepath_or_buffer.writelines(result)
- if ioargs.should_close:
- ioargs.filepath_or_buffer.close()
+ ioargs.close()
return None
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
diff --git a/pandas/io/common.py b/pandas/io/common.py
index c147ae9fd0aa8..90a79e54015c4 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -2,8 +2,9 @@
import bz2
from collections import abc
+import dataclasses
import gzip
-from io import BufferedIOBase, BytesIO, RawIOBase
+from io import BufferedIOBase, BytesIO, RawIOBase, TextIOWrapper
import mmap
import os
import pathlib
@@ -13,12 +14,14 @@
Any,
AnyStr,
Dict,
+ Generic,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
+ cast,
)
from urllib.parse import (
urljoin,
@@ -31,12 +34,12 @@
import zipfile
from pandas._typing import (
+ Buffer,
CompressionDict,
CompressionOptions,
EncodingVar,
FileOrBuffer,
FilePathOrBuffer,
- IOargs,
ModeVar,
StorageOptions,
)
@@ -56,6 +59,76 @@
from io import IOBase
+@dataclasses.dataclass
+class IOArgs(Generic[ModeVar, EncodingVar]):
+ """
+ Return value of io/common.py:get_filepath_or_buffer.
+
+ This is used to easily close created fsspec objects.
+
+ Note (copy&past from io/parsers):
+ filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
+ though mypy handling of conditional imports is difficult.
+ See https://github.com/python/mypy/issues/1297
+ """
+
+ filepath_or_buffer: FileOrBuffer
+ encoding: EncodingVar
+ mode: Union[ModeVar, str]
+ compression: CompressionDict
+ should_close: bool = False
+
+ def close(self) -> None:
+ """
+ Close the buffer if it was created by get_filepath_or_buffer.
+ """
+ if self.should_close:
+ assert not isinstance(self.filepath_or_buffer, str)
+ try:
+ self.filepath_or_buffer.close()
+ except (OSError, ValueError):
+ pass
+ self.should_close = False
+
+
+@dataclasses.dataclass
+class IOHandles:
+ """
+ Return value of io/common.py:get_handle
+
+ This is used to easily close created buffers and to handle corner cases when
+ TextIOWrapper is inserted.
+
+ handle: The file handle to be used.
+ created_handles: All file handles that are created by get_handle
+ is_wrapped: Whether a TextIOWrapper needs to be detached.
+ """
+
+ handle: Buffer
+ created_handles: List[Buffer] = dataclasses.field(default_factory=list)
+ is_wrapped: bool = False
+
+ def close(self) -> None:
+ """
+ Close all created buffers.
+
+ Note: If a TextIOWrapper was inserted, it is flushed and detached to
+ avoid closing the potentially user-created buffer.
+ """
+ if self.is_wrapped:
+ assert isinstance(self.handle, TextIOWrapper)
+ self.handle.flush()
+ self.handle.detach()
+ self.created_handles.remove(self.handle)
+ try:
+ for handle in self.created_handles:
+ handle.close()
+ except (OSError, ValueError):
+ pass
+ self.created_handles = []
+ self.is_wrapped = False
+
+
def is_url(url) -> bool:
"""
Check to see if a URL has a valid protocol.
@@ -176,7 +249,7 @@ def get_filepath_or_buffer(
compression: CompressionOptions = None,
mode: ModeVar = None, # type: ignore[assignment]
storage_options: StorageOptions = None,
-) -> IOargs[ModeVar, EncodingVar]:
+) -> IOArgs[ModeVar, EncodingVar]:
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
@@ -201,7 +274,7 @@ def get_filepath_or_buffer(
..versionchange:: 1.2.0
- Returns the dataclass IOargs.
+ Returns the dataclass IOArgs.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
@@ -225,6 +298,10 @@ def get_filepath_or_buffer(
compression = dict(compression, method=compression_method)
+ # uniform encoding names
+ if encoding is not None:
+ encoding = encoding.replace("_", "-").lower()
+
# bz2 and xz do not write the byte order mark for utf-16 and utf-32
# print a warning when writing such files
if (
@@ -258,7 +335,7 @@ def get_filepath_or_buffer(
compression = {"method": "gzip"}
reader = BytesIO(req.read())
req.close()
- return IOargs(
+ return IOArgs(
filepath_or_buffer=reader,
encoding=encoding,
compression=compression,
@@ -310,7 +387,7 @@ def get_filepath_or_buffer(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
- return IOargs(
+ return IOArgs(
filepath_or_buffer=file_obj,
encoding=encoding,
compression=compression,
@@ -323,7 +400,7 @@ def get_filepath_or_buffer(
)
if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
- return IOargs(
+ return IOArgs(
filepath_or_buffer=_expand_user(filepath_or_buffer),
encoding=encoding,
compression=compression,
@@ -335,7 +412,7 @@ def get_filepath_or_buffer(
msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
raise ValueError(msg)
- return IOargs(
+ return IOArgs(
filepath_or_buffer=filepath_or_buffer,
encoding=encoding,
compression=compression,
@@ -455,14 +532,14 @@ def infer_compression(
def get_handle(
- path_or_buf,
+ path_or_buf: FilePathOrBuffer,
mode: str,
- encoding=None,
+ encoding: Optional[str] = None,
compression: CompressionOptions = None,
memory_map: bool = False,
is_text: bool = True,
- errors=None,
-):
+ errors: Optional[str] = None,
+) -> IOHandles:
"""
Get file handle for given path/buffer and mode.
@@ -506,14 +583,9 @@ def get_handle(
See the errors argument for :func:`open` for a full list
of options.
- .. versionadded:: 1.1.0
+ .. versionchanged:: 1.2.0
- Returns
- -------
- f : file-like
- A file-like object.
- handles : list of file-like objects
- A list of file-like object that were opened in this function.
+ Returns the dataclass IOHandles
"""
need_text_wrapping: Tuple[Type["IOBase"], ...]
try:
@@ -532,12 +604,16 @@ def get_handle(
except ImportError:
pass
- handles: List[Union[IO, _MMapWrapper]] = list()
- f = path_or_buf
+ handles: List[Buffer] = list()
+
+ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior
+ if encoding is None:
+ encoding = "utf-8"
# Convert pathlib.Path/py.path.local or string
path_or_buf = stringify_path(path_or_buf)
is_path = isinstance(path_or_buf, str)
+ f = path_or_buf
compression, compression_args = get_compression_method(compression)
if is_path:
@@ -548,25 +624,29 @@ def get_handle(
# GZ Compression
if compression == "gzip":
if is_path:
+ assert isinstance(path_or_buf, str)
f = gzip.GzipFile(filename=path_or_buf, mode=mode, **compression_args)
else:
- f = gzip.GzipFile(fileobj=path_or_buf, mode=mode, **compression_args)
+ f = gzip.GzipFile(
+ fileobj=path_or_buf, # type: ignore[arg-type]
+ mode=mode,
+ **compression_args,
+ )
# BZ Compression
elif compression == "bz2":
- f = bz2.BZ2File(path_or_buf, mode=mode, **compression_args)
+ f = bz2.BZ2File(
+ path_or_buf, mode=mode, **compression_args # type: ignore[arg-type]
+ )
# ZIP Compression
elif compression == "zip":
- zf = _BytesZipFile(path_or_buf, mode, **compression_args)
- # Ensure the container is closed as well.
- handles.append(zf)
- if zf.mode == "w":
- f = zf
- elif zf.mode == "r":
- zip_names = zf.namelist()
+ f = _BytesZipFile(path_or_buf, mode, **compression_args)
+ if f.mode == "r":
+ handles.append(f)
+ zip_names = f.namelist()
if len(zip_names) == 1:
- f = zf.open(zip_names.pop())
+ f = f.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
@@ -584,36 +664,40 @@ def get_handle(
msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
+ assert not isinstance(f, str)
handles.append(f)
elif is_path:
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
is_binary_mode = "b" in mode
-
+ assert isinstance(path_or_buf, str)
if encoding and not is_binary_mode:
# Encoding
f = open(path_or_buf, mode, encoding=encoding, errors=errors, newline="")
- elif is_text and not is_binary_mode:
- # No explicit encoding
- f = open(path_or_buf, mode, errors="replace", newline="")
else:
# Binary mode
f = open(path_or_buf, mode)
handles.append(f)
# Convert BytesIO or file objects passed with an encoding
- if is_text and (compression or isinstance(f, need_text_wrapping)):
- from io import TextIOWrapper
-
- g = TextIOWrapper(f, encoding=encoding, errors=errors, newline="")
- if not isinstance(f, (BufferedIOBase, RawIOBase)):
- handles.append(g)
- f = g
+ is_wrapped = False
+ if is_text and (
+ compression
+ or isinstance(f, need_text_wrapping)
+ or "b" in getattr(f, "mode", "")
+ ):
+ f = TextIOWrapper(
+ f, encoding=encoding, errors=errors, newline="" # type: ignore[arg-type]
+ )
+ handles.append(f)
+ # do not mark as wrapped when the user provided a string
+ is_wrapped = not is_path
if memory_map and hasattr(f, "fileno"):
+ assert not isinstance(f, str)
try:
- wrapped = _MMapWrapper(f)
+ wrapped = cast(mmap.mmap, _MMapWrapper(f)) # type: ignore[arg-type]
f.close()
handles.remove(f)
handles.append(wrapped)
@@ -625,7 +709,13 @@ def get_handle(
# leave the file handler as is then
pass
- return f, handles
+ handles.reverse() # close the most recently added buffer first
+ assert not isinstance(f, str)
+ return IOHandles(
+ handle=f,
+ created_handles=handles,
+ is_wrapped=is_wrapped,
+ )
# error: Definition of "__exit__" in base class "ZipFile" is incompatible with
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index 3461652f4ea24..03c61c3ed8376 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -17,6 +17,7 @@
from pandas.core.frame import DataFrame
from pandas.io.common import (
+ IOArgs,
get_filepath_or_buffer,
is_url,
stringify_path,
@@ -349,24 +350,37 @@ def read_excel(
class BaseExcelReader(metaclass=abc.ABCMeta):
def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None):
+ self.ioargs = IOArgs(
+ filepath_or_buffer=filepath_or_buffer,
+ encoding=None,
+ mode=None,
+ compression={"method": None},
+ )
# If filepath_or_buffer is a url, load the data into a BytesIO
if is_url(filepath_or_buffer):
- filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read())
+ self.ioargs = IOArgs(
+ filepath_or_buffer=BytesIO(urlopen(filepath_or_buffer).read()),
+ should_close=True,
+ encoding=None,
+ mode=None,
+ compression={"method": None},
+ )
elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
- filepath_or_buffer = get_filepath_or_buffer(
+ self.ioargs = get_filepath_or_buffer(
filepath_or_buffer, storage_options=storage_options
- ).filepath_or_buffer
+ )
- if isinstance(filepath_or_buffer, self._workbook_class):
- self.book = filepath_or_buffer
- elif hasattr(filepath_or_buffer, "read"):
+ if isinstance(self.ioargs.filepath_or_buffer, self._workbook_class):
+ self.book = self.ioargs.filepath_or_buffer
+ elif hasattr(self.ioargs.filepath_or_buffer, "read"):
# N.B. xlrd.Book has a read attribute too
- filepath_or_buffer.seek(0)
- self.book = self.load_workbook(filepath_or_buffer)
- elif isinstance(filepath_or_buffer, str):
- self.book = self.load_workbook(filepath_or_buffer)
- elif isinstance(filepath_or_buffer, bytes):
- self.book = self.load_workbook(BytesIO(filepath_or_buffer))
+ assert not isinstance(self.ioargs.filepath_or_buffer, str)
+ self.ioargs.filepath_or_buffer.seek(0)
+ self.book = self.load_workbook(self.ioargs.filepath_or_buffer)
+ elif isinstance(self.ioargs.filepath_or_buffer, str):
+ self.book = self.load_workbook(self.ioargs.filepath_or_buffer)
+ elif isinstance(self.ioargs.filepath_or_buffer, bytes):
+ self.book = self.load_workbook(BytesIO(self.ioargs.filepath_or_buffer))
else:
raise ValueError(
"Must explicitly set engine if not passing in buffer or path for io."
@@ -382,7 +396,7 @@ def load_workbook(self, filepath_or_buffer):
pass
def close(self):
- pass
+ self.ioargs.close()
@property
@abc.abstractmethod
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index 9a42b8289ab47..198acd5862d45 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -81,9 +81,7 @@ def to_feather(
feather.write_feather(df, ioargs.filepath_or_buffer, **kwargs)
- if ioargs.should_close:
- assert not isinstance(ioargs.filepath_or_buffer, str)
- ioargs.filepath_or_buffer.close()
+ ioargs.close()
def read_feather(
@@ -137,9 +135,6 @@ def read_feather(
ioargs.filepath_or_buffer, columns=columns, use_threads=bool(use_threads)
)
- # s3fs only validates the credentials when the file is closed.
- if ioargs.should_close:
- assert not isinstance(ioargs.filepath_or_buffer, str)
- ioargs.filepath_or_buffer.close()
+ ioargs.close()
return df
diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py
index 6c62d6825bc84..20226dbb3c9d4 100644
--- a/pandas/io/formats/csvs.py
+++ b/pandas/io/formats/csvs.py
@@ -3,7 +3,6 @@
"""
import csv as csvlib
-from io import StringIO, TextIOWrapper
import os
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Sequence, Union
@@ -39,7 +38,7 @@ class CSVFormatter:
def __init__(
self,
formatter: "DataFrameFormatter",
- path_or_buf: Optional[FilePathOrBuffer[str]] = None,
+ path_or_buf: FilePathOrBuffer[str] = "",
sep: str = ",",
cols: Optional[Sequence[Label]] = None,
index_label: Optional[IndexLabel] = None,
@@ -60,25 +59,14 @@ def __init__(
self.obj = self.fmt.frame
- self.encoding = encoding or "utf-8"
-
- if path_or_buf is None:
- path_or_buf = StringIO()
-
- ioargs = get_filepath_or_buffer(
+ self.ioargs = get_filepath_or_buffer(
path_or_buf,
- encoding=self.encoding,
+ encoding=encoding,
compression=compression,
mode=mode,
storage_options=storage_options,
)
- self.compression = ioargs.compression.pop("method")
- self.compression_args = ioargs.compression
- self.path_or_buf = ioargs.filepath_or_buffer
- self.should_close = ioargs.should_close
- self.mode = ioargs.mode
-
self.sep = sep
self.index_label = self._initialize_index_label(index_label)
self.errors = errors
@@ -238,20 +226,19 @@ def save(self) -> None:
"""
Create the writer & save.
"""
- # get a handle or wrap an existing handle to take care of 1) compression and
- # 2) text -> byte conversion
- f, handles = get_handle(
- self.path_or_buf,
- self.mode,
- encoding=self.encoding,
+ # apply compression and byte/text conversion
+ handles = get_handle(
+ self.ioargs.filepath_or_buffer,
+ self.ioargs.mode,
+ encoding=self.ioargs.encoding,
errors=self.errors,
- compression=dict(self.compression_args, method=self.compression),
+ compression=self.ioargs.compression,
)
try:
# Note: self.encoding is irrelevant here
self.writer = csvlib.writer(
- f,
+ handles.handle, # type: ignore[arg-type]
lineterminator=self.line_terminator,
delimiter=self.sep,
quoting=self.quoting,
@@ -263,23 +250,10 @@ def save(self) -> None:
self._save()
finally:
- if self.should_close:
- f.close()
- elif (
- isinstance(f, TextIOWrapper)
- and not f.closed
- and f != self.path_or_buf
- and hasattr(self.path_or_buf, "write")
- ):
- # get_handle uses TextIOWrapper for non-binary handles. TextIOWrapper
- # closes the wrapped handle if it is not detached.
- f.flush() # make sure everything is written
- f.detach() # makes f unusable
- del f
- elif f != self.path_or_buf:
- f.close()
- for _fh in handles:
- _fh.close()
+ # close compression and byte/text wrapper
+ handles.close()
+ # close any fsspec-like objects
+ self.ioargs.close()
def _save(self) -> None:
if self._need_to_save_header:
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 3c759f477899b..43e76d0aef490 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1046,8 +1046,12 @@ def to_csv(
"""
from pandas.io.formats.csvs import CSVFormatter
+ created_buffer = path_or_buf is None
+ if created_buffer:
+ path_or_buf = StringIO()
+
csv_formatter = CSVFormatter(
- path_or_buf=path_or_buf,
+ path_or_buf=path_or_buf, # type: ignore[arg-type]
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
@@ -1067,9 +1071,11 @@ def to_csv(
)
csv_formatter.save()
- if path_or_buf is None:
- assert isinstance(csv_formatter.path_or_buf, StringIO)
- return csv_formatter.path_or_buf.getvalue()
+ if created_buffer:
+ assert isinstance(path_or_buf, StringIO)
+ content = path_or_buf.getvalue()
+ path_or_buf.close()
+ return content
return None
diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py
index 98b9a585d890e..bfb57f415db3b 100644
--- a/pandas/io/json/_json.py
+++ b/pandas/io/json/_json.py
@@ -1,10 +1,10 @@
from abc import ABC, abstractmethod
from collections import abc
import functools
-from io import BytesIO, StringIO
+from io import StringIO
from itertools import islice
import os
-from typing import IO, Any, Callable, List, Mapping, Optional, Tuple, Type, Union
+from typing import Any, Callable, Mapping, Optional, Tuple, Type, Union
import numpy as np
@@ -26,7 +26,12 @@
from pandas.core.generic import NDFrame
from pandas.core.reshape.concat import concat
-from pandas.io.common import get_compression_method, get_filepath_or_buffer, get_handle
+from pandas.io.common import (
+ IOHandles,
+ get_compression_method,
+ get_filepath_or_buffer,
+ get_handle,
+)
from pandas.io.json._normalize import convert_to_line_delimits
from pandas.io.json._table_schema import build_table_schema, parse_table_schema
from pandas.io.parsers import validate_integer
@@ -59,17 +64,6 @@ def to_json(
"'index=False' is only valid when 'orient' is 'split' or 'table'"
)
- if path_or_buf is not None:
- ioargs = get_filepath_or_buffer(
- path_or_buf,
- compression=compression,
- mode="wt",
- storage_options=storage_options,
- )
- path_or_buf = ioargs.filepath_or_buffer
- should_close = ioargs.should_close
- compression = ioargs.compression
-
if lines and orient != "records":
raise ValueError("'lines' keyword only valid when 'orient' is records")
@@ -101,20 +95,27 @@ def to_json(
if lines:
s = convert_to_line_delimits(s)
- if isinstance(path_or_buf, str):
- fh, handles = get_handle(path_or_buf, "w", compression=compression)
+ if path_or_buf is not None:
+ # open fsspec URLs
+ ioargs = get_filepath_or_buffer(
+ path_or_buf,
+ compression=compression,
+ mode="wt",
+ storage_options=storage_options,
+ )
+ # apply compression and byte/text conversion
+ handles = get_handle(
+ ioargs.filepath_or_buffer, "w", compression=ioargs.compression
+ )
try:
- fh.write(s)
+ handles.handle.write(s)
finally:
- fh.close()
- for handle in handles:
- handle.close()
- elif path_or_buf is None:
- return s
+ # close compression and byte/text wrapper
+ handles.close()
+ # close any fsspec-like objects
+ ioargs.close()
else:
- path_or_buf.write(s)
- if should_close:
- path_or_buf.close()
+ return s
class Writer(ABC):
@@ -545,12 +546,10 @@ def read_json(
dtype = True
if convert_axes is None and orient != "table":
convert_axes = True
- if encoding is None:
- encoding = "utf-8"
ioargs = get_filepath_or_buffer(
path_or_buf,
- encoding=encoding,
+ encoding=encoding or "utf-8",
compression=compression,
storage_options=storage_options,
)
@@ -577,9 +576,7 @@ def read_json(
return json_reader
result = json_reader.read()
- if ioargs.should_close:
- assert not isinstance(ioargs.filepath_or_buffer, str)
- ioargs.filepath_or_buffer.close()
+ ioargs.close()
return result
@@ -629,9 +626,8 @@ def __init__(
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
- self.should_close = False
self.nrows = nrows
- self.file_handles: List[IO] = []
+ self.handles: Optional[IOHandles] = None
if self.chunksize is not None:
self.chunksize = validate_integer("chunksize", self.chunksize, 1)
@@ -670,30 +666,25 @@ def _get_data_from_filepath(self, filepath_or_buffer):
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
- data = filepath_or_buffer
-
+ # if it is a string but the file does not exist, it might be a JSON string
exists = False
- if isinstance(data, str):
+ if isinstance(filepath_or_buffer, str):
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
- if exists or self.compression["method"] is not None:
- data, self.file_handles = get_handle(
+ if exists or not isinstance(filepath_or_buffer, str):
+ self.handles = get_handle(
filepath_or_buffer,
"r",
encoding=self.encoding,
compression=self.compression,
)
- self.should_close = True
- self.open_stream = data
-
- if isinstance(data, BytesIO):
- data = data.getvalue().decode()
+ filepath_or_buffer = self.handles.handle
- return data
+ return filepath_or_buffer
def _combine_lines(self, lines) -> str:
"""
@@ -757,13 +748,8 @@ def close(self):
If an open stream or file was passed, we leave it open.
"""
- if self.should_close:
- try:
- self.open_stream.close()
- except (OSError, AttributeError):
- pass
- for file_handle in self.file_handles:
- file_handle.close()
+ if self.handles is not None:
+ self.handles.close()
def __next__(self):
if self.nrows:
diff --git a/pandas/io/orc.py b/pandas/io/orc.py
index 829ff6408d86d..5a734f0878a0c 100644
--- a/pandas/io/orc.py
+++ b/pandas/io/orc.py
@@ -53,4 +53,5 @@ def read_orc(
ioargs = get_filepath_or_buffer(path)
orc_file = pyarrow.orc.ORCFile(ioargs.filepath_or_buffer)
result = orc_file.read(columns=columns, **kwargs).to_pandas()
+ ioargs.close()
return result
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 2110a2d400be8..3b72869188344 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -5,7 +5,7 @@
from collections import abc, defaultdict
import csv
import datetime
-from io import StringIO, TextIOWrapper
+from io import StringIO
import itertools
import re
import sys
@@ -63,7 +63,13 @@
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
-from pandas.io.common import get_filepath_or_buffer, get_handle, validate_header_arg
+from pandas.io.common import (
+ get_compression_method,
+ get_filepath_or_buffer,
+ get_handle,
+ stringify_path,
+ validate_header_arg,
+)
from pandas.io.date_converters import generic_parser
# BOM character (byte order mark)
@@ -428,17 +434,16 @@ def _validate_names(names):
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
- encoding = kwds.get("encoding", None)
storage_options = kwds.get("storage_options", None)
- if encoding is not None:
- encoding = re.sub("_", "-", encoding).lower()
- kwds["encoding"] = encoding
- compression = kwds.get("compression", "infer")
ioargs = get_filepath_or_buffer(
- filepath_or_buffer, encoding, compression, storage_options=storage_options
+ filepath_or_buffer,
+ kwds.get("encoding", None),
+ kwds.get("compression", "infer"),
+ storage_options=storage_options,
)
kwds["compression"] = ioargs.compression
+ kwds["encoding"] = ioargs.encoding
if kwds.get("date_parser", None) is not None:
if isinstance(kwds["parse_dates"], bool):
@@ -461,14 +466,10 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
try:
data = parser.read(nrows)
finally:
+ # close compression and byte/text wrapper
parser.close()
-
- if ioargs.should_close:
- assert not isinstance(ioargs.filepath_or_buffer, str)
- try:
- ioargs.filepath_or_buffer.close()
- except ValueError:
- pass
+ # close any fsspec-like objects
+ ioargs.close()
return data
@@ -1350,10 +1351,6 @@ def __init__(self, kwds):
self._first_chunk = True
- # GH 13932
- # keep references to file handles opened by the parser itself
- self.handles = []
-
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
@@ -1403,8 +1400,7 @@ def _validate_parse_dates_presence(self, columns: List[str]) -> None:
)
def close(self):
- for f in self.handles:
- f.close()
+ self.handles.close()
@property
def _has_complex_date_col(self):
@@ -1838,23 +1834,29 @@ def __init__(self, src, **kwds):
ParserBase.__init__(self, kwds)
- encoding = kwds.get("encoding")
+ if kwds.get("memory_map", False):
+ # memory-mapped files are directly handled by the TextReader.
+ src = stringify_path(src)
- # parsers.TextReader doesn't support compression dicts
- if isinstance(kwds.get("compression"), dict):
- kwds["compression"] = kwds["compression"]["method"]
-
- if kwds.get("compression") is None and encoding:
- if isinstance(src, str):
- src = open(src, "rb")
- self.handles.append(src)
-
- # Handle the file object with universal line mode enabled.
- # We will handle the newline character ourselves later on.
- if hasattr(src, "read") and not hasattr(src, "encoding"):
- src = TextIOWrapper(src, encoding=encoding, newline="")
+ if get_compression_method(kwds.get("compression", None))[0] is not None:
+ raise ValueError(
+ "read_csv does not support compression with memory_map=True. "
+ + "Please use memory_map=False instead."
+ )
- kwds["encoding"] = "utf-8"
+ self.handles = get_handle(
+ src,
+ mode="r",
+ encoding=kwds.get("encoding", None),
+ compression=kwds.get("compression", None),
+ memory_map=kwds.get("memory_map", False),
+ is_text=True,
+ )
+ kwds.pop("encoding", None)
+ kwds.pop("memory_map", None)
+ kwds.pop("compression", None)
+ if kwds.get("memory_map", False) and hasattr(self.handles.handle, "mmap"):
+ self.handles.handle = self.handles.handle.mmap
# #2442
kwds["allow_leading_cols"] = self.index_col is not False
@@ -1863,7 +1865,7 @@ def __init__(self, src, **kwds):
self.usecols, self.usecols_dtype = _validate_usecols_arg(kwds["usecols"])
kwds["usecols"] = self.usecols
- self._reader = parsers.TextReader(src, **kwds)
+ self._reader = parsers.TextReader(self.handles.handle, **kwds)
self.unnamed_cols = self._reader.unnamed_cols
passed_names = self.names is None
@@ -1942,11 +1944,10 @@ def __init__(self, src, **kwds):
self._implicit_index = self._reader.leading_cols > 0
- def close(self):
- for f in self.handles:
- f.close()
+ def close(self) -> None:
+ super().close()
- # close additional handles opened by C parser (for compression)
+ # close additional handles opened by C parser
try:
self._reader.close()
except ValueError:
@@ -2237,20 +2238,19 @@ def __init__(self, f, **kwds):
self.comment = kwds["comment"]
self._comment_lines = []
- f, handles = get_handle(
+ self.handles = get_handle(
f,
"r",
encoding=self.encoding,
compression=self.compression,
memory_map=self.memory_map,
)
- self.handles.extend(handles)
# Set self.data to something that can read lines.
- if hasattr(f, "readline"):
- self._make_reader(f)
+ if hasattr(self.handles.handle, "readline"):
+ self._make_reader(self.handles.handle)
else:
- self.data = f
+ self.data = self.handles.handle
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if it is specified.
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 426a40a65b522..6fa044b4651a5 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -92,25 +92,18 @@ def to_pickle(
mode="wb",
storage_options=storage_options,
)
- f, fh = get_handle(
+ handles = get_handle(
ioargs.filepath_or_buffer, "wb", compression=ioargs.compression, is_text=False
)
if protocol < 0:
protocol = pickle.HIGHEST_PROTOCOL
try:
- pickle.dump(obj, f, protocol=protocol)
+ pickle.dump(obj, handles.handle, protocol=protocol) # type: ignore[arg-type]
finally:
- if f != filepath_or_buffer:
- # do not close user-provided file objects GH 35679
- f.close()
- for _f in fh:
- _f.close()
- if ioargs.should_close:
- assert not isinstance(ioargs.filepath_or_buffer, str)
- try:
- ioargs.filepath_or_buffer.close()
- except ValueError:
- pass
+ # close compression and byte/text wrapper
+ handles.close()
+ # close any fsspec-like objects
+ ioargs.close()
def read_pickle(
@@ -193,7 +186,7 @@ def read_pickle(
ioargs = get_filepath_or_buffer(
filepath_or_buffer, compression=compression, storage_options=storage_options
)
- f, fh = get_handle(
+ handles = get_handle(
ioargs.filepath_or_buffer, "rb", compression=ioargs.compression, is_text=False
)
@@ -208,24 +201,17 @@ def read_pickle(
with warnings.catch_warnings(record=True):
# We want to silence any warnings about, e.g. moved modules.
warnings.simplefilter("ignore", Warning)
- return pickle.load(f)
+ return pickle.load(handles.handle) # type: ignore[arg-type]
except excs_to_catch:
# e.g.
# "No module named 'pandas.core.sparse.series'"
# "Can't get attribute '__nat_unpickle' on <module 'pandas._libs.tslib"
- return pc.load(f, encoding=None)
+ return pc.load(handles.handle, encoding=None)
except UnicodeDecodeError:
# e.g. can occur for files written in py27; see GH#28645 and GH#31988
- return pc.load(f, encoding="latin-1")
+ return pc.load(handles.handle, encoding="latin-1")
finally:
- if f != filepath_or_buffer:
- # do not close user-provided file objects GH 35679
- f.close()
- for _f in fh:
- _f.close()
- if ioargs.should_close:
- assert not isinstance(ioargs.filepath_or_buffer, str)
- try:
- ioargs.filepath_or_buffer.close()
- except ValueError:
- pass
+ # close compression and byte/text wrapper
+ handles.close()
+ # close any fsspec-like objects
+ ioargs.close()
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 989036917b265..e9b74199cbc42 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -16,7 +16,7 @@
from collections import abc
from datetime import datetime, timedelta
import struct
-from typing import IO, Any, Union
+from typing import IO, Any, Union, cast
import numpy as np
@@ -131,8 +131,6 @@ class SAS7BDATReader(ReaderBase, abc.Iterator):
bytes.
"""
- _path_or_buf: IO[Any]
-
def __init__(
self,
path_or_buf,
@@ -170,14 +168,12 @@ def __init__(
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
- path_or_buf = get_filepath_or_buffer(path_or_buf).filepath_or_buffer
- if isinstance(path_or_buf, str):
- buf = open(path_or_buf, "rb")
- self.handle = buf
- else:
- buf = path_or_buf
+ self.ioargs = get_filepath_or_buffer(path_or_buf)
+ if isinstance(self.ioargs.filepath_or_buffer, str):
+ self.ioargs.filepath_or_buffer = open(path_or_buf, "rb")
+ self.ioargs.should_close = True
- self._path_or_buf: IO[Any] = buf
+ self._path_or_buf = cast(IO[Any], self.ioargs.filepath_or_buffer)
try:
self._get_properties()
@@ -202,10 +198,7 @@ def column_types(self):
return np.asarray(self._column_types, dtype=np.dtype("S1"))
def close(self):
- try:
- self.handle.close()
- except AttributeError:
- pass
+ self.ioargs.close()
def _get_properties(self):
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 2a48abe9fbd63..4303cef2df60d 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -10,6 +10,7 @@
from collections import abc
from datetime import datetime
import struct
+from typing import IO, cast
import warnings
import numpy as np
@@ -252,17 +253,13 @@ def __init__(
self._index = index
self._chunksize = chunksize
- if isinstance(filepath_or_buffer, str):
- filepath_or_buffer = get_filepath_or_buffer(
- filepath_or_buffer, encoding=encoding
- ).filepath_or_buffer
+ self.ioargs = get_filepath_or_buffer(filepath_or_buffer, encoding=encoding)
- if isinstance(filepath_or_buffer, (str, bytes)):
- self.filepath_or_buffer = open(filepath_or_buffer, "rb")
- else:
- # Since xport files include non-text byte sequences, xport files
- # should already be opened in binary mode in Python 3.
- self.filepath_or_buffer = filepath_or_buffer
+ if isinstance(self.ioargs.filepath_or_buffer, str):
+ self.ioargs.filepath_or_buffer = open(self.ioargs.filepath_or_buffer, "rb")
+ self.ioargs.should_close = True
+
+ self.filepath_or_buffer = cast(IO[bytes], self.ioargs.filepath_or_buffer)
try:
self._read_header()
@@ -271,7 +268,7 @@ def __init__(
raise
def close(self):
- self.filepath_or_buffer.close()
+ self.ioargs.close()
def _get_row(self):
return self.filepath_or_buffer.read(80).decode()
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py
index caf53b5be971a..446e2daaa1f9c 100644
--- a/pandas/io/sas/sasreader.py
+++ b/pandas/io/sas/sasreader.py
@@ -139,5 +139,4 @@ def read_sas(
try:
return reader.read()
finally:
- if ioargs.should_close:
- reader.close()
+ ioargs.close()
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index cec73ceb17f09..7c7997f128086 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -16,18 +16,7 @@
from pathlib import Path
import struct
import sys
-from typing import (
- Any,
- AnyStr,
- BinaryIO,
- Dict,
- List,
- Optional,
- Sequence,
- Tuple,
- Union,
- cast,
-)
+from typing import Any, AnyStr, Dict, List, Optional, Sequence, Tuple, Union, cast
import warnings
from dateutil.relativedelta import relativedelta
@@ -35,7 +24,13 @@
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
-from pandas._typing import CompressionOptions, FilePathOrBuffer, Label, StorageOptions
+from pandas._typing import (
+ Buffer,
+ CompressionOptions,
+ FilePathOrBuffer,
+ Label,
+ StorageOptions,
+)
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import (
@@ -58,7 +53,12 @@
from pandas.core.indexes.base import Index
from pandas.core.series import Series
-from pandas.io.common import get_filepath_or_buffer, get_handle, stringify_path
+from pandas.io.common import (
+ IOHandles,
+ get_filepath_or_buffer,
+ get_handle,
+ stringify_path,
+)
_version_error = (
"Version of given Stata file is {version}. pandas supports importing "
@@ -1062,19 +1062,20 @@ def __init__(
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
- path_or_buf = stringify_path(path_or_buf)
- if isinstance(path_or_buf, str):
- path_or_buf = get_filepath_or_buffer(
- path_or_buf, storage_options=storage_options
- ).filepath_or_buffer
-
- if isinstance(path_or_buf, (str, bytes)):
- self.path_or_buf = open(path_or_buf, "rb")
+ self.ioargs = get_filepath_or_buffer(
+ path_or_buf, storage_options=storage_options
+ )
+
+ if isinstance(self.ioargs.filepath_or_buffer, (str, bytes)):
+ self.ioargs.filepath_or_buffer = open(self.ioargs.filepath_or_buffer, "rb")
+ self.ioargs.should_close = True
elif hasattr(path_or_buf, "read"):
# Copy to BytesIO, and ensure no encoding
- pb: Any = path_or_buf
- contents = pb.read()
- self.path_or_buf = BytesIO(contents)
+ contents = self.ioargs.filepath_or_buffer.read()
+ self.ioargs.close()
+ self.ioargs.filepath_or_buffer = BytesIO(contents) # type: ignore[arg-type]
+ self.ioargs.should_close = True
+ self.path_or_buf = cast(BytesIO, self.ioargs.filepath_or_buffer)
self._read_header()
self._setup_dtype()
@@ -1089,10 +1090,7 @@ def __exit__(self, exc_type, exc_value, traceback) -> None:
def close(self) -> None:
""" close the handle if its open """
- try:
- self.path_or_buf.close()
- except OSError:
- pass
+ self.ioargs.close()
def _set_encoding(self) -> None:
"""
@@ -1938,7 +1936,7 @@ def _open_file_binary_write(
fname: FilePathOrBuffer,
compression: CompressionOptions,
storage_options: StorageOptions = None,
-) -> Tuple[BinaryIO, bool, CompressionOptions]:
+) -> Tuple[IOHandles, CompressionOptions]:
"""
Open a binary file or no-op if file-like.
@@ -1958,34 +1956,22 @@ def _open_file_binary_write(
docs for the set of allowed keys and values
.. versionadded:: 1.2.0
-
- Returns
- -------
- file : file-like object
- File object supporting write
- own : bool
- True if the file was created, otherwise False
"""
- if hasattr(fname, "write"):
- # See https://github.com/python/mypy/issues/1424 for hasattr challenges
- # error: Incompatible return value type (got "Tuple[Union[str, Path,
- # IO[Any]], bool, None]", expected "Tuple[BinaryIO, bool, Union[str,
- # Mapping[str, str], None]]")
- return fname, False, None # type: ignore[return-value]
- elif isinstance(fname, (str, Path)):
- # Extract compression mode as given, if dict
- ioargs = get_filepath_or_buffer(
- fname, mode="wb", compression=compression, storage_options=storage_options
- )
- f, _ = get_handle(
- ioargs.filepath_or_buffer,
- "wb",
- compression=ioargs.compression,
- is_text=False,
- )
- return f, True, ioargs.compression
- else:
- raise TypeError("fname must be a binary file, buffer or path-like.")
+ ioargs = get_filepath_or_buffer(
+ fname, mode="wb", compression=compression, storage_options=storage_options
+ )
+ handles = get_handle(
+ ioargs.filepath_or_buffer,
+ "wb",
+ compression=ioargs.compression,
+ is_text=False,
+ )
+ if ioargs.filepath_or_buffer != fname and not isinstance(
+ ioargs.filepath_or_buffer, str
+ ):
+ # add handle created by get_filepath_or_buffer
+ handles.created_handles.append(ioargs.filepath_or_buffer)
+ return handles, ioargs.compression
def _set_endianness(endianness: str) -> str:
@@ -2236,9 +2222,8 @@ def __init__(
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
- self._own_file = True
self._compression = compression
- self._output_file: Optional[BinaryIO] = None
+ self._output_file: Optional[Buffer] = None
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
self.storage_options = storage_options
@@ -2249,21 +2234,20 @@ def __init__(
self._fname = stringify_path(fname)
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
self._converted_names: Dict[Label, str] = {}
- self._file: Optional[BinaryIO] = None
def _write(self, to_write: str) -> None:
"""
Helper to call encode before writing to file for Python 3 compat.
"""
- assert self._file is not None
- self._file.write(to_write.encode(self._encoding))
+ self.handles.handle.write(
+ to_write.encode(self._encoding) # type: ignore[arg-type]
+ )
def _write_bytes(self, value: bytes) -> None:
"""
Helper to assert file is open before writing.
"""
- assert self._file is not None
- self._file.write(value)
+ self.handles.handle.write(value) # type: ignore[arg-type]
def _prepare_categoricals(self, data: DataFrame) -> DataFrame:
"""
@@ -2527,12 +2511,14 @@ def _encode_strings(self) -> None:
self.data[col] = encoded
def write_file(self) -> None:
- self._file, self._own_file, compression = _open_file_binary_write(
+ self.handles, compression = _open_file_binary_write(
self._fname, self._compression, storage_options=self.storage_options
)
if compression is not None:
- self._output_file = self._file
- self._file = BytesIO()
+ # ZipFile creates a file (with the same name) for each write call.
+ # Write it first into a buffer and then write the buffer to the ZipFile.
+ self._output_file = self.handles.handle
+ self.handles.handle = BytesIO()
try:
self._write_header(data_label=self._data_label, time_stamp=self._time_stamp)
self._write_map()
@@ -2552,10 +2538,9 @@ def write_file(self) -> None:
self._write_map()
except Exception as exc:
self._close()
- if self._own_file:
+ if isinstance(self._fname, (str, Path)):
try:
- if isinstance(self._fname, (str, Path)):
- os.unlink(self._fname)
+ os.unlink(self._fname)
except OSError:
warnings.warn(
f"This save was not successful but {self._fname} could not "
@@ -2571,24 +2556,18 @@ def _close(self) -> None:
Close the file if it was created by the writer.
If a buffer or file-like object was passed in, for example a GzipFile,
- then leave this file open for the caller to close. In either case,
- attempt to flush the file contents to ensure they are written to disk
- (if supported)
+ then leave this file open for the caller to close.
"""
- # Some file-like objects might not support flush
- assert self._file is not None
+ # write compression
if self._output_file is not None:
- assert isinstance(self._file, BytesIO)
- bio = self._file
+ assert isinstance(self.handles.handle, BytesIO)
+ bio = self.handles.handle
bio.seek(0)
- self._file = self._output_file
- self._file.write(bio.read())
- try:
- self._file.flush()
- except AttributeError:
- pass
- if self._own_file:
- self._file.close()
+ self.handles.handle = self._output_file
+ self.handles.handle.write(bio.read()) # type: ignore[arg-type]
+ bio.close()
+ # close any created handles
+ self.handles.close()
def _write_map(self) -> None:
"""No-op, future compatibility"""
@@ -3140,8 +3119,8 @@ def _tag(val: Union[str, bytes], tag: str) -> bytes:
def _update_map(self, tag: str) -> None:
"""Update map location for tag with file position"""
- assert self._file is not None
- self._map[tag] = self._file.tell()
+ assert self.handles.handle is not None
+ self._map[tag] = self.handles.handle.tell()
def _write_header(
self,
@@ -3208,12 +3187,11 @@ def _write_map(self) -> None:
the map with 0s. The second call writes the final map locations when
all blocks have been written.
"""
- assert self._file is not None
if not self._map:
self._map = dict(
(
("stata_data", 0),
- ("map", self._file.tell()),
+ ("map", self.handles.handle.tell()),
("variable_types", 0),
("varnames", 0),
("sortlist", 0),
@@ -3229,7 +3207,7 @@ def _write_map(self) -> None:
)
)
# Move to start of map
- self._file.seek(self._map["map"])
+ self.handles.handle.seek(self._map["map"])
bio = BytesIO()
for val in self._map.values():
bio.write(struct.pack(self._byteorder + "Q", val))
diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py
index 5bf1ce508dfc4..3103f6e1ba0b1 100644
--- a/pandas/tests/frame/methods/test_to_csv.py
+++ b/pandas/tests/frame/methods/test_to_csv.py
@@ -1034,11 +1034,12 @@ def test_to_csv_compression(self, df, encoding, compression):
tm.assert_frame_equal(df, result)
# test the round trip using file handle - to_csv -> read_csv
- f, _handles = get_handle(
+ handles = get_handle(
filename, "w", compression=compression, encoding=encoding
)
- with f:
- df.to_csv(f, encoding=encoding)
+ df.to_csv(handles.handle, encoding=encoding)
+ assert not handles.handle.closed
+ handles.close()
result = pd.read_csv(
filename,
compression=compression,
diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py
index 933bdc462e3f8..2e68d3306c7d1 100644
--- a/pandas/tests/io/json/test_readlines.py
+++ b/pandas/tests/io/json/test_readlines.py
@@ -143,7 +143,7 @@ def test_readjson_chunks_closes(chunksize):
)
reader.read()
assert (
- reader.open_stream.closed
+ reader.handles.handle.closed
), f"didn't close stream with chunksize = {chunksize}"
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index b33289213e258..e61a5fce99c69 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -6,7 +6,7 @@
import csv
from datetime import datetime
from inspect import signature
-from io import StringIO
+from io import BytesIO, StringIO
import os
import platform
from urllib.error import URLError
@@ -2253,3 +2253,62 @@ def test_dict_keys_as_names(all_parsers):
result = parser.read_csv(StringIO(data), names=keys)
expected = DataFrame({"a": [1], "b": [2]})
tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("io_class", [StringIO, BytesIO])
+@pytest.mark.parametrize("encoding", [None, "utf-8"])
+def test_read_csv_file_handle(all_parsers, io_class, encoding):
+ """
+ Test whether read_csv does not close user-provided file handles.
+
+ GH 36980
+ """
+ parser = all_parsers
+ expected = DataFrame({"a": [1], "b": [2]})
+
+ content = "a,b\n1,2"
+ if io_class == BytesIO:
+ content = content.encode("utf-8")
+ handle = io_class(content)
+
+ tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected)
+ assert not handle.closed
+
+
+def test_memory_map_compression_error(c_parser_only):
+ """
+ c-parsers do not support memory_map=True with compression.
+
+ GH 36997
+ """
+ parser = c_parser_only
+ df = DataFrame({"a": [1], "b": [2]})
+ msg = (
+ "read_csv does not support compression with memory_map=True. "
+ + "Please use memory_map=False instead."
+ )
+
+ with tm.ensure_clean() as path:
+ df.to_csv(path, compression="gzip", index=False)
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(path, memory_map=True, compression="gzip")
+
+
+def test_memory_map_file_handle(all_parsers):
+ """
+ Support some buffers with memory_map=True.
+
+ GH 36997
+ """
+ parser = all_parsers
+ expected = DataFrame({"a": [1], "b": [2]})
+
+ handle = StringIO()
+ expected.to_csv(handle, index=False)
+ handle.seek(0)
+
+ tm.assert_frame_equal(
+ parser.read_csv(handle, memory_map=True),
+ expected,
+ )
diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index 876696ecdad9c..e74265da3e966 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -152,14 +152,17 @@ def test_binary_mode_file_buffers(
with open(fpath, mode="r", encoding=encoding) as fa:
result = parser.read_csv(fa)
+ assert not fa.closed
tm.assert_frame_equal(expected, result)
with open(fpath, mode="rb") as fb:
result = parser.read_csv(fb, encoding=encoding)
+ assert not fb.closed
tm.assert_frame_equal(expected, result)
with open(fpath, mode="rb", buffering=0) as fb:
result = parser.read_csv(fb, encoding=encoding)
+ assert not fb.closed
tm.assert_frame_equal(expected, result)
@@ -199,6 +202,7 @@ def test_encoding_named_temp_file(all_parsers):
result = parser.read_csv(f, encoding=encoding)
tm.assert_frame_equal(result, expected)
+ assert not f.closed
@pytest.mark.parametrize(
diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py
index 1c2518646bb29..413b78a52ad38 100644
--- a/pandas/tests/io/parser/test_textreader.py
+++ b/pandas/tests/io/parser/test_textreader.py
@@ -31,13 +31,10 @@ def test_file_handle(self):
reader = TextReader(f)
reader.read()
- def test_string_filename(self):
- reader = TextReader(self.csv1, header=None)
- reader.read()
-
def test_file_handle_mmap(self):
+ # this was never using memory_map=True
with open(self.csv1, "rb") as f:
- reader = TextReader(f, memory_map=True, header=None)
+ reader = TextReader(f, header=None)
reader.read()
def test_StringIO(self):
diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py
index 31e9ad4cf4416..8d7d5d85cbb48 100644
--- a/pandas/tests/io/test_compression.py
+++ b/pandas/tests/io/test_compression.py
@@ -47,18 +47,18 @@ def test_compression_size(obj, method, compression_only):
@pytest.mark.parametrize("method", ["to_csv", "to_json"])
def test_compression_size_fh(obj, method, compression_only):
with tm.ensure_clean() as path:
- f, handles = icom.get_handle(path, "w", compression=compression_only)
- with f:
- getattr(obj, method)(f)
- assert not f.closed
- assert f.closed
+ handles = icom.get_handle(path, "w", compression=compression_only)
+ getattr(obj, method)(handles.handle)
+ assert not handles.handle.closed
+ handles.close()
+ assert handles.handle.closed
compressed_size = os.path.getsize(path)
with tm.ensure_clean() as path:
- f, handles = icom.get_handle(path, "w", compression=None)
- with f:
- getattr(obj, method)(f)
- assert not f.closed
- assert f.closed
+ handles = icom.get_handle(path, "w", compression=None)
+ getattr(obj, method)(handles.handle)
+ assert not handles.handle.closed
+ handles.close()
+ assert handles.handle.closed
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
@@ -111,10 +111,10 @@ def test_compression_warning(compression_only):
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as path:
- f, handles = icom.get_handle(path, "w", compression=compression_only)
+ handles = icom.get_handle(path, "w", compression=compression_only)
with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False):
- with f:
- df.to_csv(f, compression=compression_only)
+ df.to_csv(handles.handle, compression=compression_only)
+ handles.close()
def test_compression_binary(compression_only):
diff --git a/pandas/tests/series/methods/test_to_csv.py b/pandas/tests/series/methods/test_to_csv.py
index a72e860340f25..714173158f4d6 100644
--- a/pandas/tests/series/methods/test_to_csv.py
+++ b/pandas/tests/series/methods/test_to_csv.py
@@ -143,11 +143,11 @@ def test_to_csv_compression(self, s, encoding, compression):
tm.assert_series_equal(s, result)
# test the round trip using file handle - to_csv -> read_csv
- f, _handles = get_handle(
+ handles = get_handle(
filename, "w", compression=compression, encoding=encoding
)
- with f:
- s.to_csv(f, encoding=encoding, header=True)
+ s.to_csv(handles.handle, encoding=encoding, header=True)
+ handles.close()
result = pd.read_csv(
filename,
compression=compression,
| - [x] closes #36980
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
REF/BUG: de-duplicate all file handling in `TextReader` by calling `get_handle` in `CParserWrapper`. ~~When `TextReader` gets a string it uses memory mapping (it is given a file object in all other cases).~~
REF/TYP: The second commit adds a new return value to `get_handle` (whether the buffer is wrapped inside a TextIOWrapper: in that case we cannot close it, we need to detach it (and flush it if we wrote to it)). I made `get_handle` return a typed dataclass `HandleArgs` and made sure that all created handles are in `HandleArgs.created_handles` there is no need to close `HandleArgs.handle` (unless it is created by `get_filename_or_buffer`).
I used asserts for mypy when I'm 100% certain about the type, otherwise I added mypy ignore statements.
In the future it might be good to merge `get_handle` and `get_filename_or_buffer`. | https://api.github.com/repos/pandas-dev/pandas/pulls/36997 | 2020-10-09T01:04:08Z | 2020-11-04T04:09:17Z | 2020-11-04T04:09:17Z | 2020-11-04T04:13:30Z |
TYP: IntervalIndex.SetopCheck | diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 93117fbc22752..cc47740dba5f2 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1,4 +1,5 @@
""" define the IntervalIndex """
+from functools import wraps
from operator import le, lt
import textwrap
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union, cast
@@ -112,43 +113,41 @@ def _new_IntervalIndex(cls, d):
return cls.from_arrays(**d)
-class SetopCheck:
+def setop_check(method):
"""
This is called to decorate the set operations of IntervalIndex
to perform the type check in advance.
"""
+ op_name = method.__name__
- def __init__(self, op_name):
- self.op_name = op_name
-
- def __call__(self, setop):
- def func(intvidx_self, other, sort=False):
- intvidx_self._assert_can_do_setop(other)
- other = ensure_index(other)
-
- if not isinstance(other, IntervalIndex):
- result = getattr(intvidx_self.astype(object), self.op_name)(other)
- if self.op_name in ("difference",):
- result = result.astype(intvidx_self.dtype)
- return result
- elif intvidx_self.closed != other.closed:
- raise ValueError(
- "can only do set operations between two IntervalIndex "
- "objects that are closed on the same side"
- )
+ @wraps(method)
+ def wrapped(self, other, sort=False):
+ self._assert_can_do_setop(other)
+ other = ensure_index(other)
- # GH 19016: ensure set op will not return a prohibited dtype
- subtypes = [intvidx_self.dtype.subtype, other.dtype.subtype]
- common_subtype = find_common_type(subtypes)
- if is_object_dtype(common_subtype):
- raise TypeError(
- f"can only do {self.op_name} between two IntervalIndex "
- "objects that have compatible dtypes"
- )
+ if not isinstance(other, IntervalIndex):
+ result = getattr(self.astype(object), op_name)(other)
+ if op_name in ("difference",):
+ result = result.astype(self.dtype)
+ return result
+ elif self.closed != other.closed:
+ raise ValueError(
+ "can only do set operations between two IntervalIndex "
+ "objects that are closed on the same side"
+ )
+
+ # GH 19016: ensure set op will not return a prohibited dtype
+ subtypes = [self.dtype.subtype, other.dtype.subtype]
+ common_subtype = find_common_type(subtypes)
+ if is_object_dtype(common_subtype):
+ raise TypeError(
+ f"can only do {op_name} between two IntervalIndex "
+ "objects that have compatible dtypes"
+ )
- return setop(intvidx_self, other, sort)
+ return method(self, other, sort)
- return func
+ return wrapped
@Appender(
@@ -1006,7 +1005,7 @@ def equals(self, other: object) -> bool:
# Set Operations
@Appender(Index.intersection.__doc__)
- @SetopCheck(op_name="intersection")
+ @setop_check
def intersection(
self, other: "IntervalIndex", sort: bool = False
) -> "IntervalIndex":
@@ -1075,7 +1074,6 @@ def _intersection_non_unique(self, other: "IntervalIndex") -> "IntervalIndex":
return self[mask]
def _setop(op_name: str, sort=None):
- @SetopCheck(op_name=op_name)
def func(self, other, sort=sort):
result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort)
result_name = get_op_result_name(self, other)
@@ -1088,7 +1086,8 @@ def func(self, other, sort=sort):
return type(self).from_tuples(result, closed=self.closed, name=result_name)
- return func
+ func.__name__ = op_name
+ return setop_check(func)
union = _setop("union")
difference = _setop("difference")
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Moves us closer to #33455 | https://api.github.com/repos/pandas-dev/pandas/pulls/36995 | 2020-10-08T22:47:56Z | 2020-10-10T17:43:05Z | 2020-10-10T17:43:05Z | 2020-10-10T18:07:29Z |
TYP/REF: use OpsMixin for arithmetic methods | diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py
index 185e9197e01fe..553649212aa5f 100644
--- a/pandas/core/arraylike.py
+++ b/pandas/core/arraylike.py
@@ -72,3 +72,73 @@ def __xor__(self, other):
@unpack_zerodim_and_defer("__rxor__")
def __rxor__(self, other):
return self._logical_method(other, roperator.rxor)
+
+ # -------------------------------------------------------------
+ # Arithmetic Methods
+
+ def _arith_method(self, other, op):
+ return NotImplemented
+
+ @unpack_zerodim_and_defer("__add__")
+ def __add__(self, other):
+ return self._arith_method(other, operator.add)
+
+ @unpack_zerodim_and_defer("__radd__")
+ def __radd__(self, other):
+ return self._arith_method(other, roperator.radd)
+
+ @unpack_zerodim_and_defer("__sub__")
+ def __sub__(self, other):
+ return self._arith_method(other, operator.sub)
+
+ @unpack_zerodim_and_defer("__rsub__")
+ def __rsub__(self, other):
+ return self._arith_method(other, roperator.rsub)
+
+ @unpack_zerodim_and_defer("__mul__")
+ def __mul__(self, other):
+ return self._arith_method(other, operator.mul)
+
+ @unpack_zerodim_and_defer("__rmul__")
+ def __rmul__(self, other):
+ return self._arith_method(other, roperator.rmul)
+
+ @unpack_zerodim_and_defer("__truediv__")
+ def __truediv__(self, other):
+ return self._arith_method(other, operator.truediv)
+
+ @unpack_zerodim_and_defer("__rtruediv__")
+ def __rtruediv__(self, other):
+ return self._arith_method(other, roperator.rtruediv)
+
+ @unpack_zerodim_and_defer("__floordiv__")
+ def __floordiv__(self, other):
+ return self._arith_method(other, operator.floordiv)
+
+ @unpack_zerodim_and_defer("__rfloordiv")
+ def __rfloordiv__(self, other):
+ return self._arith_method(other, roperator.rfloordiv)
+
+ @unpack_zerodim_and_defer("__mod__")
+ def __mod__(self, other):
+ return self._arith_method(other, operator.mod)
+
+ @unpack_zerodim_and_defer("__rmod__")
+ def __rmod__(self, other):
+ return self._arith_method(other, roperator.rmod)
+
+ @unpack_zerodim_and_defer("__divmod__")
+ def __divmod__(self, other):
+ return self._arith_method(other, divmod)
+
+ @unpack_zerodim_and_defer("__rdivmod__")
+ def __rdivmod__(self, other):
+ return self._arith_method(other, roperator.rdivmod)
+
+ @unpack_zerodim_and_defer("__pow__")
+ def __pow__(self, other):
+ return self._arith_method(other, operator.pow)
+
+ @unpack_zerodim_and_defer("__rpow__")
+ def __rpow__(self, other):
+ return self._arith_method(other, roperator.rpow)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 31a81c544a5b3..7ae36344a9966 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -5408,29 +5408,17 @@ def _cmp_method(self, other, op):
return result
return ops.invalid_comparison(self, other, op)
- @classmethod
- def _add_numeric_methods_binary(cls):
+ def _arith_method(self, other, op):
"""
- Add in numeric methods.
+ Wrapper used to dispatch arithmetic operations.
"""
- setattr(cls, "__add__", _make_arithmetic_op(operator.add, cls))
- setattr(cls, "__radd__", _make_arithmetic_op(ops.radd, cls))
- setattr(cls, "__sub__", _make_arithmetic_op(operator.sub, cls))
- setattr(cls, "__rsub__", _make_arithmetic_op(ops.rsub, cls))
- setattr(cls, "__rpow__", _make_arithmetic_op(ops.rpow, cls))
- setattr(cls, "__pow__", _make_arithmetic_op(operator.pow, cls))
- setattr(cls, "__truediv__", _make_arithmetic_op(operator.truediv, cls))
- setattr(cls, "__rtruediv__", _make_arithmetic_op(ops.rtruediv, cls))
+ from pandas import Series
- setattr(cls, "__mod__", _make_arithmetic_op(operator.mod, cls))
- setattr(cls, "__rmod__", _make_arithmetic_op(ops.rmod, cls))
- setattr(cls, "__floordiv__", _make_arithmetic_op(operator.floordiv, cls))
- setattr(cls, "__rfloordiv__", _make_arithmetic_op(ops.rfloordiv, cls))
- setattr(cls, "__divmod__", _make_arithmetic_op(divmod, cls))
- setattr(cls, "__rdivmod__", _make_arithmetic_op(ops.rdivmod, cls))
- setattr(cls, "__mul__", _make_arithmetic_op(operator.mul, cls))
- setattr(cls, "__rmul__", _make_arithmetic_op(ops.rmul, cls))
+ result = op(Series(self), other)
+ if isinstance(result, tuple):
+ return (Index(result[0]), Index(result[1]))
+ return Index(result)
@classmethod
def _add_numeric_methods_unary(cls):
@@ -5455,7 +5443,6 @@ def _evaluate_numeric_unary(self):
@classmethod
def _add_numeric_methods(cls):
cls._add_numeric_methods_unary()
- cls._add_numeric_methods_binary()
def any(self, *args, **kwargs):
"""
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index 4a6bb11bda400..14098ddadb8e2 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -811,16 +811,13 @@ def any(self, *args, **kwargs) -> bool:
# --------------------------------------------------------------------
- def _arith_method(self, other, op, step=False):
+ def _arith_method(self, other, op):
"""
Parameters
----------
other : Any
op : callable that accepts 2 params
perform the binary op
- step : callable, optional, default to False
- op to apply to the step parm if not None
- if False, use the existing step
"""
if isinstance(other, ABCTimedeltaIndex):
@@ -834,6 +831,21 @@ def _arith_method(self, other, op, step=False):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
+ if op in [
+ operator.pow,
+ ops.rpow,
+ operator.mod,
+ ops.rmod,
+ ops.rfloordiv,
+ divmod,
+ ops.rdivmod,
+ ]:
+ return op(self._int64index, other)
+
+ step = False
+ if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
+ step = op
+
other = extract_array(other, extract_numpy=True)
attrs = self._get_attributes_dict()
@@ -871,35 +883,3 @@ def _arith_method(self, other, op, step=False):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
-
- @unpack_zerodim_and_defer("__add__")
- def __add__(self, other):
- return self._arith_method(other, operator.add)
-
- @unpack_zerodim_and_defer("__radd__")
- def __radd__(self, other):
- return self._arith_method(other, ops.radd)
-
- @unpack_zerodim_and_defer("__sub__")
- def __sub__(self, other):
- return self._arith_method(other, operator.sub)
-
- @unpack_zerodim_and_defer("__rsub__")
- def __rsub__(self, other):
- return self._arith_method(other, ops.rsub)
-
- @unpack_zerodim_and_defer("__mul__")
- def __mul__(self, other):
- return self._arith_method(other, operator.mul, step=operator.mul)
-
- @unpack_zerodim_and_defer("__rmul__")
- def __rmul__(self, other):
- return self._arith_method(other, ops.rmul, step=ops.rmul)
-
- @unpack_zerodim_and_defer("__truediv__")
- def __truediv__(self, other):
- return self._arith_method(other, operator.truediv, step=operator.truediv)
-
- @unpack_zerodim_and_defer("__rtruediv__")
- def __rtruediv__(self, other):
- return self._arith_method(other, ops.rtruediv, step=ops.rtruediv)
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py
index ae21f13ea3f49..0de842e8575af 100644
--- a/pandas/core/ops/__init__.py
+++ b/pandas/core/ops/__init__.py
@@ -19,7 +19,6 @@
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms
-from pandas.core.construction import extract_array
from pandas.core.ops.array_ops import ( # noqa:F401
arithmetic_op,
comp_method_OBJECT_ARRAY,
@@ -27,7 +26,7 @@
get_array_op,
logical_op,
)
-from pandas.core.ops.common import unpack_zerodim_and_defer
+from pandas.core.ops.common import unpack_zerodim_and_defer # noqa:F401
from pandas.core.ops.docstrings import (
_arith_doc_FRAME,
_flex_comp_doc_FRAME,
@@ -300,29 +299,6 @@ def align_method_SERIES(left: "Series", right, align_asobject: bool = False):
return left, right
-def arith_method_SERIES(cls, op, special):
- """
- Wrapper function for Series arithmetic operations, to avoid
- code duplication.
- """
- assert special # non-special uses flex_method_SERIES
- op_name = _get_op_name(op, special)
-
- @unpack_zerodim_and_defer(op_name)
- def wrapper(left, right):
- res_name = get_op_result_name(left, right)
- left, right = align_method_SERIES(left, right)
-
- lvalues = extract_array(left, extract_numpy=True)
- rvalues = extract_array(right, extract_numpy=True)
- result = arithmetic_op(lvalues, rvalues, op)
-
- return left._construct_result(result, name=res_name)
-
- wrapper.__name__ = op_name
- return wrapper
-
-
def flex_method_SERIES(cls, op, special):
assert not special # "special" also means "not flex"
name = _get_op_name(op, special)
diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py
index 70fd814423c7f..05da378f8964d 100644
--- a/pandas/core/ops/methods.py
+++ b/pandas/core/ops/methods.py
@@ -45,7 +45,6 @@ def _get_method_wrappers(cls):
# are no longer in __init__
from pandas.core.ops import (
arith_method_FRAME,
- arith_method_SERIES,
comp_method_FRAME,
flex_comp_method_FRAME,
flex_method_SERIES,
@@ -55,7 +54,7 @@ def _get_method_wrappers(cls):
# Just Series
arith_flex = flex_method_SERIES
comp_flex = flex_method_SERIES
- arith_special = arith_method_SERIES
+ arith_special = None
comp_special = None
bool_special = None
elif issubclass(cls, ABCDataFrame):
@@ -105,20 +104,19 @@ def f(self, other):
f.__name__ = f"__i{name}__"
return f
- new_methods.update(
- dict(
- __iadd__=_wrap_inplace_method(new_methods["__add__"]),
- __isub__=_wrap_inplace_method(new_methods["__sub__"]),
- __imul__=_wrap_inplace_method(new_methods["__mul__"]),
- __itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
- __ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]),
- __imod__=_wrap_inplace_method(new_methods["__mod__"]),
- __ipow__=_wrap_inplace_method(new_methods["__pow__"]),
- )
- )
-
if bool_method is None:
- # Series gets bool_method via OpsMixin
+ # Series gets bool_method, arith_method via OpsMixin
+ new_methods.update(
+ dict(
+ __iadd__=_wrap_inplace_method(cls.__add__),
+ __isub__=_wrap_inplace_method(cls.__sub__),
+ __imul__=_wrap_inplace_method(cls.__mul__),
+ __itruediv__=_wrap_inplace_method(cls.__truediv__),
+ __ifloordiv__=_wrap_inplace_method(cls.__floordiv__),
+ __imod__=_wrap_inplace_method(cls.__mod__),
+ __ipow__=_wrap_inplace_method(cls.__pow__),
+ )
+ )
new_methods.update(
dict(
__iand__=_wrap_inplace_method(cls.__and__),
@@ -127,6 +125,17 @@ def f(self, other):
)
)
else:
+ new_methods.update(
+ dict(
+ __iadd__=_wrap_inplace_method(new_methods["__add__"]),
+ __isub__=_wrap_inplace_method(new_methods["__sub__"]),
+ __imul__=_wrap_inplace_method(new_methods["__mul__"]),
+ __itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
+ __ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]),
+ __imod__=_wrap_inplace_method(new_methods["__mod__"]),
+ __ipow__=_wrap_inplace_method(new_methods["__pow__"]),
+ )
+ )
new_methods.update(
dict(
__iand__=_wrap_inplace_method(new_methods["__and__"]),
@@ -172,30 +181,34 @@ def _create_methods(cls, arith_method, comp_method, bool_method, special):
have_divmod = issubclass(cls, ABCSeries)
# divmod is available for Series
- new_methods = dict(
- add=arith_method(cls, operator.add, special),
- radd=arith_method(cls, radd, special),
- sub=arith_method(cls, operator.sub, special),
- mul=arith_method(cls, operator.mul, special),
- truediv=arith_method(cls, operator.truediv, special),
- floordiv=arith_method(cls, operator.floordiv, special),
- mod=arith_method(cls, operator.mod, special),
- pow=arith_method(cls, operator.pow, special),
- # not entirely sure why this is necessary, but previously was included
- # so it's here to maintain compatibility
- rmul=arith_method(cls, rmul, special),
- rsub=arith_method(cls, rsub, special),
- rtruediv=arith_method(cls, rtruediv, special),
- rfloordiv=arith_method(cls, rfloordiv, special),
- rpow=arith_method(cls, rpow, special),
- rmod=arith_method(cls, rmod, special),
- )
- new_methods["div"] = new_methods["truediv"]
- new_methods["rdiv"] = new_methods["rtruediv"]
- if have_divmod:
- # divmod doesn't have an op that is supported by numexpr
- new_methods["divmod"] = arith_method(cls, divmod, special)
- new_methods["rdivmod"] = arith_method(cls, rdivmod, special)
+ new_methods = {}
+ if arith_method is not None:
+ new_methods.update(
+ dict(
+ add=arith_method(cls, operator.add, special),
+ radd=arith_method(cls, radd, special),
+ sub=arith_method(cls, operator.sub, special),
+ mul=arith_method(cls, operator.mul, special),
+ truediv=arith_method(cls, operator.truediv, special),
+ floordiv=arith_method(cls, operator.floordiv, special),
+ mod=arith_method(cls, operator.mod, special),
+ pow=arith_method(cls, operator.pow, special),
+ # not entirely sure why this is necessary, but previously was included
+ # so it's here to maintain compatibility
+ rmul=arith_method(cls, rmul, special),
+ rsub=arith_method(cls, rsub, special),
+ rtruediv=arith_method(cls, rtruediv, special),
+ rfloordiv=arith_method(cls, rfloordiv, special),
+ rpow=arith_method(cls, rpow, special),
+ rmod=arith_method(cls, rmod, special),
+ )
+ )
+ new_methods["div"] = new_methods["truediv"]
+ new_methods["rdiv"] = new_methods["rtruediv"]
+ if have_divmod:
+ # divmod doesn't have an op that is supported by numexpr
+ new_methods["divmod"] = arith_method(cls, divmod, special)
+ new_methods["rdivmod"] = arith_method(cls, rdivmod, special)
if comp_method is not None:
# Series already has this pinned
@@ -210,7 +223,7 @@ def _create_methods(cls, arith_method, comp_method, bool_method, special):
)
)
- if bool_method:
+ if bool_method is not None:
new_methods.update(
dict(
and_=bool_method(cls, operator.and_, special),
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a2a6023bf4626..bec4445ecac83 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -4990,6 +4990,24 @@ def _logical_method(self, other, op):
res_values = ops.logical_op(lvalues, rvalues, op)
return self._construct_result(res_values, name=res_name)
+ def _arith_method(self, other, op):
+ res_name = ops.get_op_result_name(self, other)
+ self, other = ops.align_method_SERIES(self, other)
+
+ lvalues = extract_array(self, extract_numpy=True)
+ rvalues = extract_array(other, extract_numpy=True)
+ result = ops.arithmetic_op(lvalues, rvalues, op)
+
+ return self._construct_result(result, name=res_name)
+
+ def __div__(self, other):
+ # Alias for backward compat
+ return self.__truediv__(other)
+
+ def __rdiv__(self, other):
+ # Alias for backward compat
+ return self.__rtruediv__(other)
+
Series._add_numeric_operations()
diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py
index b3dfb5d015ab4..3e979aed0551f 100644
--- a/pandas/tests/arithmetic/test_timedelta64.py
+++ b/pandas/tests/arithmetic/test_timedelta64.py
@@ -2159,7 +2159,7 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names):
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
- result = ser.__rdiv__(tdi)
+ result = ser.__rtruediv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
We'll be able to use this in DataFrame too following #36843 | https://api.github.com/repos/pandas-dev/pandas/pulls/36994 | 2020-10-08T22:45:38Z | 2020-10-10T17:41:39Z | 2020-10-10T17:41:39Z | 2020-10-10T20:04:36Z |
CI: xfail intermittently-failing tests | diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index 822342113f62a..6abd8a010ea69 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -8,7 +8,7 @@
import numpy as np
import pytest
-from pandas.compat import IS64, is_platform_windows
+from pandas.compat import IS64, PY38, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
@@ -1695,6 +1695,11 @@ def test_json_multiindex(self, dataframe, expected):
result = series.to_json(orient="index")
assert result == expected
+ @pytest.mark.xfail(
+ is_platform_windows() and PY38,
+ reason="localhost connection rejected",
+ strict=False,
+ )
def test_to_s3(self, s3_resource, s3so):
import time
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 9114edc19315f..67ee9348394dd 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -9,7 +9,7 @@
import numpy as np
import pytest
-from pandas.compat import PY38
+from pandas.compat import PY38, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
@@ -559,6 +559,11 @@ def test_categorical(self, pa):
expected = df.astype(object)
check_round_trip(df, pa, expected=expected)
+ @pytest.mark.xfail(
+ is_platform_windows() and PY38,
+ reason="localhost connection rejected",
+ strict=False,
+ )
def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so):
s3fs = pytest.importorskip("s3fs")
if LooseVersion(pyarrow.__version__) <= LooseVersion("0.17.0"):
diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py
index 4ac23c2cffa15..805a284c8f863 100644
--- a/pandas/tests/plotting/test_groupby.py
+++ b/pandas/tests/plotting/test_groupby.py
@@ -4,6 +4,7 @@
import numpy as np
import pytest
+from pandas.compat import PY38, is_platform_windows
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, Series
@@ -13,6 +14,11 @@
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
+ @pytest.mark.xfail(
+ is_platform_windows() and not PY38,
+ reason="Looks like LinePlot._is_ts_plot is wrong",
+ strict=False,
+ )
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Usually I avoid `strict=False` like the plague, but these are a PITA recently. | https://api.github.com/repos/pandas-dev/pandas/pulls/36993 | 2020-10-08T21:38:27Z | 2020-10-08T23:44:11Z | 2020-10-08T23:44:11Z | 2020-10-08T23:44:22Z |
Backport PR #36946 on branch 1.1.x (REGR: Make DateOffset immutable) | diff --git a/doc/source/whatsnew/v1.1.4.rst b/doc/source/whatsnew/v1.1.4.rst
index e63912ebc8fee..d0d03021629c6 100644
--- a/doc/source/whatsnew/v1.1.4.rst
+++ b/doc/source/whatsnew/v1.1.4.rst
@@ -14,7 +14,7 @@ including other versions of pandas.
Fixed regressions
~~~~~~~~~~~~~~~~~
--
+- Fixed regression where attempting to mutate a :class:`DateOffset` object would no longer raise an ``AttributeError`` (:issue:`36940`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index ac2725fc58aee..a85dfc36b97fa 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -1209,9 +1209,8 @@ class DateOffset(RelativeDeltaOffset, metaclass=OffsetMeta):
>>> ts + DateOffset(months=2)
Timestamp('2017-03-01 09:10:11')
"""
-
- pass
-
+ def __setattr__(self, name, value):
+ raise AttributeError("DateOffset objects are immutable.")
# --------------------------------------------------------------------
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 8c51908c547f4..67cbbac47bc68 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -4438,3 +4438,20 @@ def test_week_add_invalid():
other = Day()
with pytest.raises(TypeError, match="Cannot add"):
offset + other
+
+
+@pytest.mark.parametrize(
+ "attribute",
+ [
+ "hours",
+ "days",
+ "weeks",
+ "months",
+ "years",
+ ],
+)
+def test_dateoffset_immutable(attribute):
+ offset = DateOffset(**{attribute: 0})
+ msg = "DateOffset objects are immutable"
+ with pytest.raises(AttributeError, match=msg):
+ setattr(offset, attribute, 5)
| Backport PR #36946: REGR: Make DateOffset immutable | https://api.github.com/repos/pandas-dev/pandas/pulls/36992 | 2020-10-08T21:11:31Z | 2020-10-09T11:20:34Z | 2020-10-09T11:20:34Z | 2020-10-09T11:20:34Z |
TYP: sas, stata, style | diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 13010bb2ef147..3e4780ec21378 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -1407,6 +1407,7 @@ def _value_formatter(
if float_format:
def base_formatter(v):
+ assert float_format is not None # for mypy
return float_format(value=v) if notna(v) else self.na_rep
else:
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 1df37da3da8d0..0089d7a32f723 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -1511,7 +1511,10 @@ def from_custom_template(cls, searchpath, name):
"""
loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(searchpath), cls.loader])
- class MyStyler(cls):
+ # mypy doesnt like dynamically-defined class
+ # error: Variable "cls" is not valid as a type [valid-type]
+ # error: Invalid base class "cls" [misc]
+ class MyStyler(cls): # type:ignore[valid-type,misc]
env = jinja2.Environment(loader=loader)
template = env.get_template(name)
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index f2ee642d8fd42..989036917b265 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -16,6 +16,7 @@
from collections import abc
from datetime import datetime, timedelta
import struct
+from typing import IO, Any, Union
import numpy as np
@@ -62,12 +63,42 @@ def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series:
raise ValueError("unit must be 'd' or 's'")
-class _subheader_pointer:
- pass
+class _SubheaderPointer:
+ offset: int
+ length: int
+ compression: int
+ ptype: int
+ def __init__(self, offset: int, length: int, compression: int, ptype: int):
+ self.offset = offset
+ self.length = length
+ self.compression = compression
+ self.ptype = ptype
-class _column:
- pass
+
+class _Column:
+ col_id: int
+ name: Union[str, bytes]
+ label: Union[str, bytes]
+ format: Union[str, bytes] # TODO: i think allowing bytes is from py2 days
+ ctype: bytes
+ length: int
+
+ def __init__(
+ self,
+ col_id: int,
+ name: Union[str, bytes],
+ label: Union[str, bytes],
+ format: Union[str, bytes],
+ ctype: bytes,
+ length: int,
+ ):
+ self.col_id = col_id
+ self.name = name
+ self.label = label
+ self.format = format
+ self.ctype = ctype
+ self.length = length
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
@@ -100,6 +131,8 @@ class SAS7BDATReader(ReaderBase, abc.Iterator):
bytes.
"""
+ _path_or_buf: IO[Any]
+
def __init__(
self,
path_or_buf,
@@ -121,7 +154,7 @@ def __init__(
self.convert_header_text = convert_header_text
self.default_encoding = "latin-1"
- self.compression = ""
+ self.compression = b""
self.column_names_strings = []
self.column_names = []
self.column_formats = []
@@ -137,10 +170,14 @@ def __init__(
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
- self._path_or_buf = get_filepath_or_buffer(path_or_buf).filepath_or_buffer
- if isinstance(self._path_or_buf, str):
- self._path_or_buf = open(self._path_or_buf, "rb")
- self.handle = self._path_or_buf
+ path_or_buf = get_filepath_or_buffer(path_or_buf).filepath_or_buffer
+ if isinstance(path_or_buf, str):
+ buf = open(path_or_buf, "rb")
+ self.handle = buf
+ else:
+ buf = path_or_buf
+
+ self._path_or_buf: IO[Any] = buf
try:
self._get_properties()
@@ -319,7 +356,7 @@ def _read_float(self, offset, width):
return struct.unpack(self.byte_order + fd, buf)[0]
# Read a single signed integer of the given width (1, 2, 4 or 8).
- def _read_int(self, offset, width):
+ def _read_int(self, offset: int, width: int) -> int:
if width not in (1, 2, 4, 8):
self.close()
raise ValueError("invalid int width")
@@ -328,7 +365,7 @@ def _read_int(self, offset, width):
iv = struct.unpack(self.byte_order + it, buf)[0]
return iv
- def _read_bytes(self, offset, length):
+ def _read_bytes(self, offset: int, length: int):
if self._cached_page is None:
self._path_or_buf.seek(offset)
buf = self._path_or_buf.read(length)
@@ -400,14 +437,14 @@ def _get_subheader_index(self, signature, compression, ptype):
if index is None:
f1 = (compression == const.compressed_subheader_id) or (compression == 0)
f2 = ptype == const.compressed_subheader_type
- if (self.compression != "") and f1 and f2:
+ if (self.compression != b"") and f1 and f2:
index = const.SASIndex.data_subheader_index
else:
self.close()
raise ValueError("Unknown subheader signature")
return index
- def _process_subheader_pointers(self, offset, subheader_pointer_index):
+ def _process_subheader_pointers(self, offset: int, subheader_pointer_index: int):
subheader_pointer_length = self._subheader_pointer_length
total_offset = offset + subheader_pointer_length * subheader_pointer_index
@@ -423,11 +460,9 @@ def _process_subheader_pointers(self, offset, subheader_pointer_index):
subheader_type = self._read_int(total_offset, 1)
- x = _subheader_pointer()
- x.offset = subheader_offset
- x.length = subheader_length
- x.compression = subheader_compression
- x.ptype = subheader_type
+ x = _SubheaderPointer(
+ subheader_offset, subheader_length, subheader_compression, subheader_type
+ )
return x
@@ -519,7 +554,7 @@ def _process_columntext_subheader(self, offset, length):
self.column_names_strings.append(cname)
if len(self.column_names_strings) == 1:
- compression_literal = ""
+ compression_literal = b""
for cl in const.compression_literals:
if cl in cname_raw:
compression_literal = cl
@@ -532,7 +567,7 @@ def _process_columntext_subheader(self, offset, length):
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
- if compression_literal == "":
+ if compression_literal == b"":
self._lcs = 0
offset1 = offset + 32
if self.U64:
@@ -657,13 +692,14 @@ def _process_format_subheader(self, offset, length):
column_format = format_names[format_start : format_start + format_len]
current_column_number = len(self.columns)
- col = _column()
- col.col_id = current_column_number
- col.name = self.column_names[current_column_number]
- col.label = column_label
- col.format = column_format
- col.ctype = self._column_types[current_column_number]
- col.length = self._column_data_lengths[current_column_number]
+ col = _Column(
+ current_column_number,
+ self.column_names[current_column_number],
+ column_label,
+ column_format,
+ self._column_types[current_column_number],
+ self._column_data_lengths[current_column_number],
+ )
self.column_formats.append(column_format)
self.columns.append(col)
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py
index 9727ec930119b..2a48abe9fbd63 100644
--- a/pandas/io/sas/sas_xport.py
+++ b/pandas/io/sas/sas_xport.py
@@ -337,16 +337,16 @@ def _read_header(self):
obs_length = 0
while len(fielddata) >= fieldnamelength:
# pull data for one field
- field, fielddata = (
+ fieldbytes, fielddata = (
fielddata[:fieldnamelength],
fielddata[fieldnamelength:],
)
# rest at end gets ignored, so if field is short, pad out
# to match struct pattern below
- field = field.ljust(140)
+ fieldbytes = fieldbytes.ljust(140)
- fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", field)
+ fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", fieldbytes)
field = dict(zip(_fieldkeys, fieldstruct))
del field["_"]
field["ntype"] = types[field["ntype"]]
@@ -408,8 +408,8 @@ def _record_count(self) -> int:
return total_records_length // self.record_length
self.filepath_or_buffer.seek(-80, 2)
- last_card = self.filepath_or_buffer.read(80)
- last_card = np.frombuffer(last_card, dtype=np.uint64)
+ last_card_bytes = self.filepath_or_buffer.read(80)
+ last_card = np.frombuffer(last_card_bytes, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
@@ -483,7 +483,7 @@ def read(self, nrows=None):
df[x] = v
if self._index is None:
- df.index = range(self._lines_read, self._lines_read + read_lines)
+ df.index = pd.Index(range(self._lines_read, self._lines_read + read_lines))
else:
df = df.set_index(self._index)
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index d36bd42e7da8d..55dde374048b6 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -16,7 +16,18 @@
from pathlib import Path
import struct
import sys
-from typing import Any, AnyStr, BinaryIO, Dict, List, Optional, Sequence, Tuple, Union
+from typing import (
+ Any,
+ AnyStr,
+ BinaryIO,
+ Dict,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ cast,
+)
import warnings
from dateutil.relativedelta import relativedelta
@@ -1389,6 +1400,7 @@ def _setup_dtype(self) -> np.dtype:
dtypes = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
+ typ = cast(str, typ) # only strs in NUMPY_TYPE_MAP
dtypes.append(("s" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
dtypes.append(("s" + str(i), "S" + str(typ)))
@@ -1699,6 +1711,7 @@ def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFra
if fmt not in self.VALID_RANGE:
continue
+ fmt = cast(str, fmt) # only strs in VALID_RANGE
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
diff --git a/setup.cfg b/setup.cfg
index cd20249728062..836b3460f3896 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -235,21 +235,12 @@ check_untyped_defs=False
[mypy-pandas.io.formats.format]
check_untyped_defs=False
-[mypy-pandas.io.formats.style]
-check_untyped_defs=False
-
[mypy-pandas.io.parsers]
check_untyped_defs=False
[mypy-pandas.io.pytables]
check_untyped_defs=False
-[mypy-pandas.io.sas.sas_xport]
-check_untyped_defs=False
-
-[mypy-pandas.io.sas.sas7bdat]
-check_untyped_defs=False
-
[mypy-pandas.io.stata]
check_untyped_defs=False
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36990 | 2020-10-08T20:28:13Z | 2020-10-10T17:48:33Z | 2020-10-10T17:48:33Z | 2020-10-10T18:08:02Z |
TYP: generic, series, frame | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ebe5185ce4488..fd7d0190dbbcb 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -9311,8 +9311,8 @@ def _AXIS_NAMES(self) -> Dict[int, str]:
ops.add_special_arithmetic_methods(DataFrame)
-def _from_nested_dict(data):
- new_data = collections.defaultdict(dict)
+def _from_nested_dict(data) -> collections.defaultdict:
+ new_data: collections.defaultdict = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 338b45b5503dc..8cc6ca6630099 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -112,6 +112,7 @@
if TYPE_CHECKING:
from pandas._libs.tslibs import BaseOffset
+ from pandas.core.frame import DataFrame
from pandas.core.resample import Resampler
from pandas.core.series import Series
from pandas.core.window.indexers import BaseIndexer
@@ -130,7 +131,7 @@
)
-def _single_replace(self, to_replace, method, inplace, limit):
+def _single_replace(self: "Series", to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
@@ -541,6 +542,7 @@ def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:
from pandas.core.computation.parsing import clean_column_name
if isinstance(self, ABCSeries):
+ self = cast("Series", self)
return {clean_column_name(self.name): self}
return {
@@ -1995,9 +1997,10 @@ def _repr_data_resource_(self):
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
- payload = json.loads(
- data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
- )
+
+ as_json = data.to_json(orient="table")
+ as_json = cast(str, as_json)
+ payload = json.loads(as_json, object_pairs_hook=collections.OrderedDict)
return payload
# ----------------------------------------------------------------------
@@ -3113,6 +3116,7 @@ def to_latex(
if multirow is None:
multirow = config.get_option("display.latex.multirow")
+ self = cast("DataFrame", self)
formatter = DataFrameFormatter(
self,
columns=columns,
@@ -3830,7 +3834,7 @@ def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
- if not gc.get_referents(r) or r.shape == self.shape:
+ if not gc.get_referents(r) or (r is not None and r.shape == self.shape):
self._is_copy = None
return
@@ -6684,6 +6688,7 @@ def replace(
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
+ self = cast("Series", self)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
@@ -7265,10 +7270,13 @@ def asof(self, where, subset=None):
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
+ self = cast("Series", self)
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
+ self = cast("DataFrame", self)
return self._constructor(np.nan, index=where, columns=self.columns)
else:
+ self = cast("DataFrame", self)
return self._constructor_sliced(
np.nan, index=self.columns, name=where[0]
)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a2a6023bf4626..e615031032cf4 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1789,12 +1789,17 @@ def count(self, level=None):
"""
if level is None:
return notna(self.array).sum()
+ elif not isinstance(self.index, MultiIndex):
+ raise ValueError("Series.count level is only valid with a MultiIndex")
+
+ index = self.index
+ assert isinstance(index, MultiIndex) # for mypy
if isinstance(level, str):
- level = self.index._get_level_number(level)
+ level = index._get_level_number(level)
- lev = self.index.levels[level]
- level_codes = np.array(self.index.codes[level], subok=False, copy=True)
+ lev = index.levels[level]
+ level_codes = np.array(index.codes[level], subok=False, copy=True)
mask = level_codes == -1
if mask.any():
diff --git a/pandas/tests/series/methods/test_count.py b/pandas/tests/series/methods/test_count.py
index 1ca48eeb7c441..19290b6a5c23f 100644
--- a/pandas/tests/series/methods/test_count.py
+++ b/pandas/tests/series/methods/test_count.py
@@ -1,4 +1,5 @@
import numpy as np
+import pytest
import pandas as pd
from pandas import Categorical, MultiIndex, Series
@@ -6,6 +7,13 @@
class TestSeriesCount:
+ def test_count_level_without_multiindex(self):
+ ser = pd.Series(range(3))
+
+ msg = "Series.count level is only valid with a MultiIndex"
+ with pytest.raises(ValueError, match=msg):
+ ser.count(level=1)
+
def test_count(self, datetime_series):
assert datetime_series.count() == len(datetime_series)
diff --git a/setup.cfg b/setup.cfg
index cd20249728062..b8631b55638df 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -208,9 +208,6 @@ check_untyped_defs=False
[mypy-pandas.core.reshape.merge]
check_untyped_defs=False
-[mypy-pandas.core.series]
-check_untyped_defs=False
-
[mypy-pandas.core.window.common]
check_untyped_defs=False
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
cc @simonjayhawkins
| https://api.github.com/repos/pandas-dev/pandas/pulls/36989 | 2020-10-08T19:16:38Z | 2020-10-10T16:23:27Z | 2020-10-10T16:23:27Z | 2020-10-10T16:24:50Z |
Fix test_unstack | diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py
index 3774e018a8e51..7be50c5f8c305 100644
--- a/pandas/tests/extension/base/reshaping.py
+++ b/pandas/tests/extension/base/reshaping.py
@@ -316,7 +316,9 @@ def test_unstack(self, data, index, obj):
alt = df.unstack(level=level).droplevel(0, axis=1)
self.assert_frame_equal(result, alt)
- expected = ser.astype(object).unstack(level=level)
+ expected = ser.astype(object).unstack(
+ level=level, fill_value=data.dtype.na_value
+ )
result = result.astype(object)
self.assert_frame_equal(result, expected)
| The test was failing if the ExtensionDtype had an na_value that wasn't equivalent to nan
- [x] closes #36986
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36987 | 2020-10-08T18:58:09Z | 2020-10-08T22:01:44Z | 2020-10-08T22:01:44Z | 2020-10-09T15:46:31Z |
CLN: move maybe_casted_values from pandas/core/frame.py to pandas/core/dtype/cast.py | diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py
index 48391ab7d9373..4b746a4ee9b21 100644
--- a/pandas/core/dtypes/cast.py
+++ b/pandas/core/dtypes/cast.py
@@ -73,7 +73,12 @@
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
-from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna
+from pandas.core.dtypes.missing import (
+ is_valid_nat_for_dtype,
+ isna,
+ na_value_for_dtype,
+ notna,
+)
if TYPE_CHECKING:
from pandas import Series
@@ -439,6 +444,65 @@ def changeit():
return result, False
+def maybe_casted_values(index, codes=None):
+ """
+ Convert an index, given directly or as a pair (level, code), to a 1D array.
+
+ Parameters
+ ----------
+ index : Index
+ codes : sequence of integers (optional)
+
+ Returns
+ -------
+ ExtensionArray or ndarray
+ If codes is `None`, the values of `index`.
+ If codes is passed, an array obtained by taking from `index` the indices
+ contained in `codes`.
+ """
+
+ values = index._values
+ if not isinstance(index, (ABCPeriodIndex, ABCDatetimeIndex)):
+ if values.dtype == np.object_:
+ values = lib.maybe_convert_objects(values)
+
+ # if we have the codes, extract the values with a mask
+ if codes is not None:
+ mask = codes == -1
+
+ # we can have situations where the whole mask is -1,
+ # meaning there is nothing found in codes, so make all nan's
+ if mask.size > 0 and mask.all():
+ dtype = index.dtype
+ fill_value = na_value_for_dtype(dtype)
+ values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
+ else:
+ values = values.take(codes)
+
+ # TODO(https://github.com/pandas-dev/pandas/issues/24206)
+ # Push this into maybe_upcast_putmask?
+ # We can't pass EAs there right now. Looks a bit
+ # complicated.
+ # So we unbox the ndarray_values, op, re-box.
+ values_type = type(values)
+ values_dtype = values.dtype
+
+ from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
+
+ if isinstance(values, DatetimeLikeArrayMixin):
+ values = values._data # TODO: can we de-kludge yet?
+
+ if mask.any():
+ values, _ = maybe_upcast_putmask(values, mask, np.nan)
+
+ if issubclass(values_type, DatetimeLikeArrayMixin):
+ values = values_type(
+ values, dtype=values_dtype
+ ) # type: ignore[call-arg]
+
+ return values
+
+
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 8a330e3d595cf..4783e8c3ce427 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -83,11 +83,11 @@
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_cast_to_datetime,
+ maybe_casted_values,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
maybe_upcast,
- maybe_upcast_putmask,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
@@ -114,7 +114,7 @@
needs_i8_conversion,
pandas_dtype,
)
-from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
+from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
@@ -125,15 +125,12 @@
transform,
)
from pandas.core.arrays import Categorical, ExtensionArray
-from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import extract_array
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences
-from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.multi import MultiIndex, maybe_droplevels
-from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
@@ -4847,46 +4844,6 @@ class max type
else:
new_obj = self.copy()
- def _maybe_casted_values(index, labels=None):
- values = index._values
- if not isinstance(index, (PeriodIndex, DatetimeIndex)):
- if values.dtype == np.object_:
- values = lib.maybe_convert_objects(values)
-
- # if we have the labels, extract the values with a mask
- if labels is not None:
- mask = labels == -1
-
- # we can have situations where the whole mask is -1,
- # meaning there is nothing found in labels, so make all nan's
- if mask.size > 0 and mask.all():
- dtype = index.dtype
- fill_value = na_value_for_dtype(dtype)
- values = construct_1d_arraylike_from_scalar(
- fill_value, len(mask), dtype
- )
- else:
- values = values.take(labels)
-
- # TODO(https://github.com/pandas-dev/pandas/issues/24206)
- # Push this into maybe_upcast_putmask?
- # We can't pass EAs there right now. Looks a bit
- # complicated.
- # So we unbox the ndarray_values, op, re-box.
- values_type = type(values)
- values_dtype = values.dtype
-
- if issubclass(values_type, DatetimeLikeArray):
- values = values._data # TODO: can we de-kludge yet?
-
- if mask.any():
- values, _ = maybe_upcast_putmask(values, mask, np.nan)
-
- if issubclass(values_type, DatetimeLikeArray):
- values = values_type(values, dtype=values_dtype)
-
- return values
-
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
@@ -4929,7 +4886,7 @@ def _maybe_casted_values(index, labels=None):
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
- level_values = _maybe_casted_values(lev, lab)
+ level_values = maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
| - [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
xref #36876, #27370 (stale PR) | https://api.github.com/repos/pandas-dev/pandas/pulls/36985 | 2020-10-08T18:49:26Z | 2020-10-10T22:33:41Z | 2020-10-10T22:33:41Z | 2020-10-11T04:01:00Z |
FIX: fix cleanup warnings for errorbar timeseries | diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index bdb86d2dd846f..b51ce375a7ed7 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -2658,67 +2658,84 @@ def test_pie_df_nan(self):
@pytest.mark.slow
def test_errorbar_plot(self):
- with warnings.catch_warnings():
- d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
- df = DataFrame(d)
- d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
- df_err = DataFrame(d_err)
+ d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
+ df = DataFrame(d)
+ d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
+ df_err = DataFrame(d_err)
- # check line plots
- ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
+ # check line plots
+ ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
- kinds = ["line", "bar", "barh"]
- for kind in kinds:
- ax = _check_plot_works(df.plot, yerr=df_err["x"], kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
- self._check_has_errorbars(ax, xerr=2, yerr=2)
- ax = _check_plot_works(
- df.plot, yerr=df_err["x"], xerr=df_err["x"], kind=kind
- )
- self._check_has_errorbars(ax, xerr=2, yerr=2)
- ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
- self._check_has_errorbars(ax, xerr=2, yerr=2)
-
- # _check_plot_works adds an ax so catch warning. see GH #13188
- axes = _check_plot_works(
- df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind
- )
- self._check_has_errorbars(axes, xerr=1, yerr=1)
-
- ax = _check_plot_works(
- (df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True
- )
- self._check_has_errorbars(ax, xerr=2, yerr=2)
+ ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
- # yerr is raw error values
- ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4)
- self._check_has_errorbars(ax, xerr=0, yerr=1)
- ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
+ ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+
+ ax = _check_plot_works(
+ (df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True
+ )
+ self._check_has_errorbars(ax, xerr=2, yerr=2)
+
+ # yerr is raw error values
+ ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4)
+ self._check_has_errorbars(ax, xerr=0, yerr=1)
+
+ ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+
+ # yerr is column name
+ for yerr in ["yerr", "誤差"]:
+ s_df = df.copy()
+ s_df[yerr] = np.ones(12) * 0.2
+
+ ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
- # yerr is column name
- for yerr in ["yerr", "誤差"]:
- s_df = df.copy()
- s_df[yerr] = np.ones(12) * 0.2
- ax = _check_plot_works(s_df.plot, yerr=yerr)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(s_df.plot, y="y", x="x", yerr=yerr)
- self._check_has_errorbars(ax, xerr=0, yerr=1)
+ ax = _check_plot_works(s_df.plot, y="y", x="x", yerr=yerr)
+ self._check_has_errorbars(ax, xerr=0, yerr=1)
- with pytest.raises(ValueError):
- df.plot(yerr=np.random.randn(11))
+ with pytest.raises(ValueError):
+ df.plot(yerr=np.random.randn(11))
- df_err = DataFrame({"x": ["zzz"] * 12, "y": ["zzz"] * 12})
- with pytest.raises((ValueError, TypeError)):
- df.plot(yerr=df_err)
+ df_err = DataFrame({"x": ["zzz"] * 12, "y": ["zzz"] * 12})
+ with pytest.raises((ValueError, TypeError)):
+ df.plot(yerr=df_err)
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize("kind", ["line", "bar", "barh"])
+ def test_errorbar_plot_different_kinds(self, kind):
+ d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
+ df = DataFrame(d)
+ d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
+ df_err = DataFrame(d_err)
+
+ ax = _check_plot_works(df.plot, yerr=df_err["x"], kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+
+ ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+
+ ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
+ self._check_has_errorbars(ax, xerr=2, yerr=2)
+
+ ax = _check_plot_works(df.plot, yerr=df_err["x"], xerr=df_err["x"], kind=kind)
+ self._check_has_errorbars(ax, xerr=2, yerr=2)
+
+ ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
+ self._check_has_errorbars(ax, xerr=2, yerr=2)
+
+ with tm.assert_produces_warning(UserWarning):
+ # _check_plot_works creates subplots inside,
+ # which leads to warnings like this:
+ # UserWarning: To output multiple subplots,
+ # the figure containing the passed axes is being cleared
+ # Similar warnings were observed in GH #13188
+ axes = _check_plot_works(
+ df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind
+ )
+ self._check_has_errorbars(axes, xerr=1, yerr=1)
@pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError)
@pytest.mark.slow
@@ -2765,35 +2782,39 @@ def test_errorbar_with_partial_columns(self):
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
- def test_errorbar_timeseries(self):
+ @pytest.mark.parametrize("kind", ["line", "bar", "barh"])
+ def test_errorbar_timeseries(self, kind):
+ d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
+ d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
- with warnings.catch_warnings():
- d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
- d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
+ # check time-series plots
+ ix = date_range("1/1/2000", "1/1/2001", freq="M")
+ tdf = DataFrame(d, index=ix)
+ tdf_err = DataFrame(d_err, index=ix)
- # check time-series plots
- ix = date_range("1/1/2000", "1/1/2001", freq="M")
- tdf = DataFrame(d, index=ix)
- tdf_err = DataFrame(d_err, index=ix)
+ ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
- kinds = ["line", "bar", "barh"]
- for kind in kinds:
- ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
- ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=1)
- ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=1)
- ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
- self._check_has_errorbars(ax, xerr=0, yerr=2)
-
- # _check_plot_works adds an ax so catch warning. see GH #13188
- axes = _check_plot_works(
- tdf.plot, kind=kind, yerr=tdf_err, subplots=True
- )
- self._check_has_errorbars(axes, xerr=0, yerr=1)
+ ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+
+ ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=1)
+
+ ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=1)
+
+ ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
+ self._check_has_errorbars(ax, xerr=0, yerr=2)
+
+ with tm.assert_produces_warning(UserWarning):
+ # _check_plot_works creates subplots inside,
+ # which leads to warnings like this:
+ # UserWarning: To output multiple subplots,
+ # the figure containing the passed axes is being cleared
+ # Similar warnings were observed in GH #13188
+ axes = _check_plot_works(tdf.plot, kind=kind, yerr=tdf_err, subplots=True)
+ self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Currently in ``pandas/tests/plotting/tests_frame.py`` there are several warnings emitted.
```
pandas/tests/plotting/test_frame.py::TestDataFramePlots::test_mpl2_color_cycle_str
/workspaces/pandas/pandas/plotting/_matplotlib/style.py:64: MatplotlibDeprecationWarning: Support for uppercase single-letter colors is deprecated since Matplotlib 3.1 and will be removed in 3.3; please use lowercase instead.
[conv.to_rgba(c) for c in colors]
pandas/tests/plotting/test_frame.py::TestDataFramePlots::test_errorbar_plot
pandas/tests/plotting/test_frame.py::TestDataFramePlots::test_errorbar_plot
pandas/tests/plotting/test_frame.py::TestDataFramePlots::test_errorbar_plot
pandas/tests/plotting/test_frame.py::TestDataFramePlots::test_errorbar_timeseries
pandas/tests/plotting/test_frame.py::TestDataFramePlots::test_errorbar_timeseries
pandas/tests/plotting/test_frame.py::TestDataFramePlots::test_errorbar_timeseries
/workspaces/pandas/pandas/plotting/_matplotlib/__init__.py:61: UserWarning: To output multiple subplots, the figure containing the passed axes is being cleared
plot_obj.generate()
-- Docs: https://docs.pytest.org/en/stable/warnings.html
```
This PR handles warnings in ``test_errorbar_timeseries``.
If this approach is considered reasonable by the reviewers, then I will do the same for ``test_errorbar_plot`` as well as other similar warnings among the plotting-related tests. | https://api.github.com/repos/pandas-dev/pandas/pulls/36982 | 2020-10-08T14:52:03Z | 2020-10-10T17:33:48Z | 2020-10-10T17:33:48Z | 2020-10-10T18:11:39Z |
BUG: fix matplotlib warning on CN color | diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py
index 3e0954ef3d74d..b919728971505 100644
--- a/pandas/plotting/_matplotlib/style.py
+++ b/pandas/plotting/_matplotlib/style.py
@@ -56,29 +56,9 @@ def random_color(column):
else:
raise ValueError("color_type must be either 'default' or 'random'")
- if isinstance(colors, str):
- conv = matplotlib.colors.ColorConverter()
-
- def _maybe_valid_colors(colors):
- try:
- [conv.to_rgba(c) for c in colors]
- return True
- except ValueError:
- return False
-
- # check whether the string can be convertible to single color
- maybe_single_color = _maybe_valid_colors([colors])
- # check whether each character can be convertible to colors
- maybe_color_cycle = _maybe_valid_colors(list(colors))
- if maybe_single_color and maybe_color_cycle and len(colors) > 1:
- hex_color = [c["color"] for c in list(plt.rcParams["axes.prop_cycle"])]
- colors = [hex_color[int(colors[1])]]
- elif maybe_single_color:
- colors = [colors]
- else:
- # ``colors`` is regarded as color cycle.
- # mpl will raise error any of them is invalid
- pass
+ if isinstance(colors, str) and _is_single_color(colors):
+ # GH #36972
+ colors = [colors]
# Append more colors by cycling if there is not enough color.
# Extra colors will be ignored by matplotlib if there are more colors
@@ -94,3 +74,33 @@ def _maybe_valid_colors(colors):
colors += colors[:mod]
return colors
+
+
+def _is_single_color(color: str) -> bool:
+ """Check if ``color`` is a single color.
+
+ Examples of single colors:
+ - 'r'
+ - 'g'
+ - 'red'
+ - 'green'
+ - 'C3'
+
+ Parameters
+ ----------
+ color : string
+ Color string.
+
+ Returns
+ -------
+ bool
+ True if ``color`` looks like a valid color.
+ False otherwise.
+ """
+ conv = matplotlib.colors.ColorConverter()
+ try:
+ conv.to_rgba(color)
+ except ValueError:
+ return False
+ else:
+ return True
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py
index bdb86d2dd846f..74fbbf13e9597 100644
--- a/pandas/tests/plotting/test_frame.py
+++ b/pandas/tests/plotting/test_frame.py
@@ -170,10 +170,21 @@ def test_integer_array_plot(self):
def test_mpl2_color_cycle_str(self):
# GH 15516
- colors = ["C" + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=["a", "b", "c"])
- for c in colors:
- _check_plot_works(df.plot, color=c)
+ colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always", "MatplotlibDeprecationWarning")
+
+ for color in colors:
+ _check_plot_works(df.plot, color=color)
+
+ # if warning is raised, check that it is the exact problematic one
+ # GH 36972
+ if w:
+ match = "Support for uppercase single-letter colors is deprecated"
+ warning_message = str(w[0].message)
+ msg = "MatplotlibDeprecationWarning related to CN colors was raised"
+ assert match not in warning_message, msg
def test_color_single_series_list(self):
# GH 3486
| - [ ] closes #36972
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/36981 | 2020-10-08T12:42:03Z | 2020-10-10T22:33:16Z | 2020-10-10T22:33:15Z | 2020-10-20T11:37:15Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.