title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
CLN: remove redundant code in IndexOpsMixin.item | diff --git a/pandas/core/base.py b/pandas/core/base.py
index 813de491ffdb3..b62ef668df5e1 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -22,7 +22,6 @@
is_list_like,
is_object_dtype,
is_scalar,
- needs_i8_conversion,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
@@ -656,13 +655,6 @@ def item(self):
ValueError
If the data is not length-1.
"""
- if not (
- is_extension_array_dtype(self.dtype) or needs_i8_conversion(self.dtype)
- ):
- # numpy returns ints instead of datetime64/timedelta64 objects,
- # which we need to wrap in Timestamp/Timedelta/Period regardless.
- return self._values.item()
-
if len(self) == 1:
return next(iter(self))
raise ValueError("can only convert an array of size 1 to a Python scalar")
| the code removed in this PR was added (with tests) in #30175.
in a latter PR, #31506, IndexOpsMixin.\_\_iter__ was modified, making the special case in IndexOpsMixin.item redundant.
cc @jbrockmendel | https://api.github.com/repos/pandas-dev/pandas/pulls/35008 | 2020-06-26T10:00:30Z | 2020-06-26T12:34:10Z | 2020-06-26T12:34:10Z | 2020-06-26T12:40:48Z |
CI: lint failure on master | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 102c457f94a95..9c223d66b727b 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -6,7 +6,6 @@
from pandas._config import option_context
-from pandas._libs import reduction as libreduction
from pandas._typing import Axis
from pandas.util._decorators import cache_readonly
| https://api.github.com/repos/pandas-dev/pandas/pulls/35007 | 2020-06-26T09:23:04Z | 2020-06-26T12:26:43Z | 2020-06-26T12:26:42Z | 2020-06-26T12:40:24Z | |
BUG: DataFrame.melt gives unexpected result when column "value" already exists | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index c5eb2febe8ae9..009e8ec3eb52b 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -816,6 +816,7 @@ Deprecations
- :meth:`util.testing.assert_almost_equal` now accepts both relative and absolute
precision through the ``rtol``, and ``atol`` parameters, thus deprecating the
``check_less_precise`` parameter. (:issue:`13357`).
+- :func:`DataFrame.melt` accepting a value_name that already exists is deprecated, and will be removed in a future version (:issue:`34731`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py
index cd0619738677d..923b9e7462d8b 100644
--- a/pandas/core/reshape/melt.py
+++ b/pandas/core/reshape/melt.py
@@ -1,5 +1,6 @@
import re
from typing import TYPE_CHECKING, List, cast
+import warnings
import numpy as np
@@ -40,6 +41,16 @@ def melt(
else:
cols = list(frame.columns)
+ if value_name in frame.columns:
+ warnings.warn(
+ "This dataframe has a column name that matches the 'value_name' column "
+ "name of the resultiing Dataframe. "
+ "In the future this will raise an error, please set the 'value_name' "
+ "parameter of DataFrame.melt to a unique name.",
+ FutureWarning,
+ stacklevel=3,
+ )
+
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index 000a6354277ab..a0fa10802f860 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -1014,3 +1014,17 @@ def test_col_substring_of_stubname(self):
)
result = pd.wide_to_long(wide_df, stubnames="PA", i=["node_id", "A"], j="time")
tm.assert_frame_equal(result, expected)
+
+ def test_warn_of_column_name_value(self):
+ # GH34731
+ # raise a warning if the resultant value column name matches
+ # a name in the dataframe already (default name is "value")
+ df = pd.DataFrame({"col": list("ABC"), "value": range(10, 16, 2)})
+ expected = pd.DataFrame(
+ [["A", "col", "A"], ["B", "col", "B"], ["C", "col", "C"]],
+ columns=["value", "variable", "value"],
+ )
+
+ with tm.assert_produces_warning(FutureWarning):
+ result = df.melt(id_vars="value")
+ tm.assert_frame_equal(result, expected)
| - [ x] closes #34731
- [x ] tests added / passed
- [ X] passes `black pandas`
- [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/35003 | 2020-06-26T03:30:25Z | 2020-07-02T15:26:45Z | 2020-07-02T15:26:44Z | 2022-11-02T19:57:37Z |
CLN: remove libreduction.Reducer | diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 58de682c56d55..97c491776f831 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -1,17 +1,12 @@
from copy import copy
from cython import Py_ssize_t
-from cpython.ref cimport Py_INCREF
from libc.stdlib cimport malloc, free
import numpy as np
cimport numpy as cnp
-from numpy cimport (ndarray,
- int64_t,
- PyArray_SETITEM,
- PyArray_ITER_NEXT, PyArray_ITER_DATA, PyArray_IterNew,
- flatiter)
+from numpy cimport ndarray, int64_t
cnp.import_array()
from pandas._libs cimport util
@@ -26,146 +21,6 @@ cdef _check_result_array(object obj, Py_ssize_t cnt):
raise ValueError('Function does not reduce')
-cdef class Reducer:
- """
- Performs generic reduction operation on a C or Fortran-contiguous ndarray
- while avoiding ndarray construction overhead
- """
- cdef:
- Py_ssize_t increment, chunksize, nresults
- object dummy, f, labels, typ, ityp, index
- ndarray arr
-
- def __init__(
- self, ndarray arr, object f, int axis=1, object dummy=None, object labels=None
- ):
- cdef:
- Py_ssize_t n, k
-
- n, k = (<object>arr).shape
-
- if axis == 0:
- if not arr.flags.f_contiguous:
- arr = arr.copy('F')
-
- self.nresults = k
- self.chunksize = n
- self.increment = n * arr.dtype.itemsize
- else:
- if not arr.flags.c_contiguous:
- arr = arr.copy('C')
-
- self.nresults = n
- self.chunksize = k
- self.increment = k * arr.dtype.itemsize
-
- self.f = f
- self.arr = arr
- self.labels = labels
- self.dummy, self.typ, self.index, self.ityp = self._check_dummy(
- dummy=dummy)
-
- cdef _check_dummy(self, object dummy=None):
- cdef:
- object index = None, typ = None, ityp = None
-
- if dummy is None:
- dummy = np.empty(self.chunksize, dtype=self.arr.dtype)
-
- # our ref is stolen later since we are creating this array
- # in cython, so increment first
- Py_INCREF(dummy)
-
- else:
-
- # we passed a Series
- typ = type(dummy)
- index = dummy.index
- dummy = dummy.values
-
- if dummy.dtype != self.arr.dtype:
- raise ValueError('Dummy array must be same dtype')
- if len(dummy) != self.chunksize:
- raise ValueError(f'Dummy array must be length {self.chunksize}')
-
- return dummy, typ, index, ityp
-
- def get_result(self):
- cdef:
- char* dummy_buf
- ndarray arr, result, chunk
- Py_ssize_t i
- flatiter it
- object res, name, labels
- object cached_typ = None
-
- arr = self.arr
- chunk = self.dummy
- dummy_buf = chunk.data
- chunk.data = arr.data
- labels = self.labels
-
- result = np.empty(self.nresults, dtype='O')
- it = <flatiter>PyArray_IterNew(result)
- reduction_success = True
-
- try:
- for i in range(self.nresults):
-
- # create the cached type
- # each time just reassign the data
- if i == 0:
-
- if self.typ is not None:
- # In this case, we also have self.index
- name = labels[i]
- cached_typ = self.typ(
- chunk, index=self.index, name=name, dtype=arr.dtype)
-
- # use the cached_typ if possible
- if cached_typ is not None:
- # In this case, we also have non-None labels
- name = labels[i]
-
- object.__setattr__(
- cached_typ._mgr._block, 'values', chunk)
- object.__setattr__(cached_typ, 'name', name)
- res = self.f(cached_typ)
- else:
- res = self.f(chunk)
-
- # TODO: reason for not squeezing here?
- extracted_res = _extract_result(res, squeeze=False)
- if i == 0:
- # On the first pass, we check the output shape to see
- # if this looks like a reduction.
- # If it does not, return the computed value to be used by the
- # pure python implementation,
- # so the function won't be called twice on the same object,
- # and side effects would occur twice
- try:
- _check_result_array(extracted_res, len(self.dummy))
- except ValueError as err:
- if "Function does not reduce" not in str(err):
- # catch only the specific exception
- raise
-
- reduction_success = False
- PyArray_SETITEM(result, PyArray_ITER_DATA(it), copy(res))
- break
-
- PyArray_SETITEM(result, PyArray_ITER_DATA(it), extracted_res)
- chunk.data = chunk.data + self.increment
- PyArray_ITER_NEXT(it)
-
- finally:
- # so we don't free the wrong memory
- chunk.data = dummy_buf
-
- result = maybe_convert_objects(result)
- return result, reduction_success
-
-
cdef class _BaseGrouper:
cdef _check_dummy(self, object dummy):
# both values and index must be an ndarray!
@@ -610,30 +465,3 @@ cdef class BlockSlider:
# axis=1 is the frame's axis=0
arr.data = self.base_ptrs[i]
arr.shape[1] = 0
-
-
-def compute_reduction(arr: ndarray, f, axis: int = 0, dummy=None, labels=None):
- """
-
- Parameters
- -----------
- arr : np.ndarray
- f : function
- axis : integer axis
- dummy : type of reduced output (series)
- labels : Index or None
- """
-
- # We either have both dummy and labels, or neither of them
- if (labels is None) ^ (dummy is None):
- raise ValueError("Must pass either dummy and labels, or neither")
-
- if labels is not None:
- # Caller is responsible for ensuring we don't have MultiIndex
- assert labels.nlevels == 1
-
- # pass as an ndarray/ExtensionArray
- labels = labels._values
-
- reducer = Reducer(arr, f, axis=axis, dummy=dummy, labels=labels)
- return reducer.get_result()
diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py
index 9df45f7a23f55..f20eed4575e91 100644
--- a/pandas/tests/groupby/test_bin_groupby.py
+++ b/pandas/tests/groupby/test_bin_groupby.py
@@ -6,7 +6,7 @@
from pandas.core.dtypes.common import ensure_int64
import pandas as pd
-from pandas import Index, Series, isna
+from pandas import Series, isna
import pandas._testing as tm
@@ -136,37 +136,3 @@ def _ohlc(group):
class TestMoments:
pass
-
-
-class TestReducer:
- def test_int_index(self):
- arr = np.random.randn(100, 4)
-
- msg = "Must pass either dummy and labels, or neither"
- # we must pass either both labels and dummy, or neither
- with pytest.raises(ValueError, match=msg):
- libreduction.compute_reduction(arr, np.sum, labels=Index(np.arange(4)))
-
- with pytest.raises(ValueError, match=msg):
- libreduction.compute_reduction(
- arr, np.sum, axis=1, labels=Index(np.arange(100))
- )
-
- dummy = Series(0.0, index=np.arange(100))
- result, _ = libreduction.compute_reduction(
- arr, np.sum, dummy=dummy, labels=Index(np.arange(4))
- )
- expected = arr.sum(0)
- tm.assert_almost_equal(result, expected)
-
- dummy = Series(0.0, index=np.arange(4))
- result, _ = libreduction.compute_reduction(
- arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))
- )
- expected = arr.sum(1)
- tm.assert_almost_equal(result, expected)
-
- result, _ = libreduction.compute_reduction(
- arr, np.sum, axis=1, dummy=dummy, labels=Index(np.arange(100))
- )
- tm.assert_almost_equal(result, expected)
| https://api.github.com/repos/pandas-dev/pandas/pulls/35001 | 2020-06-26T00:50:21Z | 2020-06-26T20:18:30Z | 2020-06-26T20:18:30Z | 2020-06-26T20:31:18Z | |
BUG: item_cache not cleared on DataFrame.values | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 521d16ac0b905..199fd4e858b5e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1341,6 +1341,7 @@ def to_numpy(
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
+ self._consolidate_inplace()
result = self._mgr.as_array(
transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value
)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 307bf84068424..692a1f96b90b1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -5330,6 +5330,7 @@ def values(self) -> np.ndarray:
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
+ self._consolidate_inplace()
return self._mgr.as_array(transpose=self._AXIS_REVERSED)
@property
@@ -6526,7 +6527,7 @@ def replace(
f"Replacement lists must match in length. "
f"Expecting {len(to_replace)} got {len(value)} "
)
-
+ self._consolidate_inplace()
new_data = self._mgr.replace_list(
src_list=to_replace,
dest_list=value,
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 6055a6205d286..843e7ce40fef8 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -812,7 +812,7 @@ def as_array(
.values.to_numpy(dtype=dtype, na_value=na_value)
.reshape(self.blocks[0].shape)
)
- elif self._is_single_block or not self.is_mixed_type:
+ elif self._is_single_block:
arr = np.asarray(self.blocks[0].get_values())
if dtype:
arr = arr.astype(dtype, copy=False)
diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py
index e2910a2eb6100..d5554860c034d 100644
--- a/pandas/tests/frame/test_block_internals.py
+++ b/pandas/tests/frame/test_block_internals.py
@@ -86,9 +86,14 @@ def test_modify_values(self, float_frame):
# unconsolidated
float_frame["E"] = 7.0
+ col = float_frame["E"]
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
+ # check that item_cache was cleared
+ assert float_frame["E"] is not col
+ assert (col == 7).all()
+
def test_boolean_set_uncons(self, float_frame):
float_frame["E"] = 7.0
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34999 | 2020-06-25T22:37:02Z | 2020-06-26T23:53:24Z | 2020-06-26T23:53:24Z | 2020-09-07T06:58:36Z |
API: User-control of result keys in GroupBy.apply | diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 0fb59c50efa74..bc772b5dab66c 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -1052,7 +1052,14 @@ Some operations on the grouped data might not fit into either the aggregate or
transform categories. Or, you may simply want GroupBy to infer how to combine
the results. For these, use the ``apply`` function, which can be substituted
for both ``aggregate`` and ``transform`` in many standard use cases. However,
-``apply`` can handle some exceptional use cases, for example:
+``apply`` can handle some exceptional use cases.
+
+.. note::
+
+ ``apply`` can act as a reducer, transformer, *or* filter function, depending
+ on exactly what is passed to it. It can depend on the passed function and
+ exactly what you are grouping. Thus the grouped column(s) may be included in
+ the output as well as set the indices.
.. ipython:: python
@@ -1064,16 +1071,14 @@ for both ``aggregate`` and ``transform`` in many standard use cases. However,
The dimension of the returned result can also change:
-.. ipython::
-
- In [8]: grouped = df.groupby('A')['C']
+.. ipython:: python
- In [10]: def f(group):
- ....: return pd.DataFrame({'original': group,
- ....: 'demeaned': group - group.mean()})
- ....:
+ grouped = df.groupby('A')['C']
- In [11]: grouped.apply(f)
+ def f(group):
+ return pd.DataFrame({'original': group,
+ 'demeaned': group - group.mean()})
+ grouped.apply(f)
``apply`` on a Series can operate on a returned value from the applied function,
that is itself a series, and possibly upcast the result to a DataFrame:
@@ -1088,11 +1093,33 @@ that is itself a series, and possibly upcast the result to a DataFrame:
s
s.apply(f)
+Control grouped column(s) placement with ``group_keys``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
.. note::
- ``apply`` can act as a reducer, transformer, *or* filter function, depending on exactly what is passed to it.
- So depending on the path taken, and exactly what you are grouping. Thus the grouped columns(s) may be included in
- the output as well as set the indices.
+ If ``group_keys=True`` is specified when calling :meth:`~DataFrame.groupby`,
+ functions passed to ``apply`` that return like-indexed outputs will have the
+ group keys added to the result index. Previous versions of pandas would add
+ the group keys only when the result from the applied function had a different
+ index than the input. If ``group_keys`` is not specified, the group keys will
+ not be added for like-indexed outputs. In the future this behavior
+ will change to always respect ``group_keys``, which defaults to ``True``.
+
+ .. versionchanged:: 1.5.0
+
+To control whether the grouped column(s) are included in the indices, you can use
+the argument ``group_keys``. Compare
+
+.. ipython:: python
+
+ df.groupby("A", group_keys=True).apply(lambda x: x)
+
+with
+
+.. ipython:: python
+
+ df.groupby("A", group_keys=False).apply(lambda x: x)
Similar to :ref:`groupby.aggregate.udfs`, the resulting dtype will reflect that of the
apply function. If the results from different groups have different dtypes, then
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 9cbfa49cc8c5c..e89e2f878fc24 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -342,10 +342,15 @@ Now every group is evaluated only a single time.
*New behavior*:
-.. ipython:: python
-
- df.groupby("a").apply(func)
+.. code-block:: python
+ In [3]: df.groupby('a').apply(func)
+ x
+ y
+ Out[3]:
+ a b
+ 0 x 1
+ 1 y 2
Concatenating sparse values
^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst
index 7340f2475e1f6..87982a149054c 100644
--- a/doc/source/whatsnew/v1.4.0.rst
+++ b/doc/source/whatsnew/v1.4.0.rst
@@ -455,10 +455,20 @@ result's index is not the same as the input's.
*New behavior*:
-.. ipython:: python
+.. code-block:: ipython
- df.groupby(['a']).apply(func)
- df.set_index(['a', 'b']).groupby(['a']).apply(func)
+ In [5]: df.groupby(['a']).apply(func)
+ Out[5]:
+ a b c
+ 0 1 3 5
+ 1 2 4 6
+
+ In [6]: df.set_index(['a', 'b']).groupby(['a']).apply(func)
+ Out[6]:
+ c
+ a b
+ 1 3 5
+ 2 4 6
Now in both cases it is determined that ``func`` is a transform. In each case,
the result has the same index as the input.
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst
index 8c02785647861..632d2e9f5b87f 100644
--- a/doc/source/whatsnew/v1.5.0.rst
+++ b/doc/source/whatsnew/v1.5.0.rst
@@ -24,10 +24,56 @@ Styler
- Added a new method :meth:`.Styler.concat` which allows adding customised footer rows to visualise additional calculations on the data, e.g. totals and counts etc. (:issue:`43875`, :issue:`46186`)
- :meth:`.Styler.highlight_null` now accepts ``color`` consistently with other builtin methods and deprecates ``null_color`` although this remains backwards compatible (:issue:`45907`)
-.. _whatsnew_150.enhancements.enhancement2:
+.. _whatsnew_150.enhancements.resample_group_keys:
-enhancement2
-^^^^^^^^^^^^
+Control of index with ``group_keys`` in :meth:`DataFrame.resample`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The argument ``group_keys`` has been added to the method :meth:`DataFrame.resample`.
+As with :meth:`DataFrame.groupby`, this argument controls the whether each group is added
+to the index in the resample when :meth:`.Resampler.apply` is used.
+
+.. warning::
+ Not specifying the ``group_keys`` argument will retain the
+ previous behavior and emit a warning if the result will change
+ by specifying ``group_keys=False``. In a future version
+ of pandas, not specifying ``group_keys`` will default to
+ the same behavior as ``group_keys=False``.
+
+.. ipython:: python
+
+ df = pd.DataFrame(
+ {'a': range(6)},
+ index=pd.date_range("2021-01-01", periods=6, freq="8H")
+ )
+ df.resample("D", group_keys=True).apply(lambda x: x)
+ df.resample("D", group_keys=False).apply(lambda x: x)
+
+Previously, the resulting index would depend upon the values returned by ``apply``,
+as seen in the following example.
+
+.. code-block:: ipython
+
+ In [1]: # pandas 1.3
+ In [2]: df.resample("D").apply(lambda x: x)
+ Out[2]:
+ a
+ 2021-01-01 00:00:00 0
+ 2021-01-01 08:00:00 1
+ 2021-01-01 16:00:00 2
+ 2021-01-02 00:00:00 3
+ 2021-01-02 08:00:00 4
+ 2021-01-02 16:00:00 5
+
+ In [3]: df.resample("D").apply(lambda x: x.reset_index())
+ Out[3]:
+ index a
+ 2021-01-01 0 2021-01-01 00:00:00 0
+ 1 2021-01-01 08:00:00 1
+ 2 2021-01-01 16:00:00 2
+ 2021-01-02 0 2021-01-02 00:00:00 3
+ 1 2021-01-02 08:00:00 4
+ 2 2021-01-02 16:00:00 5
.. _whatsnew_150.enhancements.other:
@@ -345,6 +391,23 @@ that their usage is considered unsafe, and can lead to unexpected results.
See the documentation of :class:`ExcelWriter` for further details.
+.. _whatsnew_150.deprecations.group_keys_in_apply:
+
+Using ``group_keys`` with transformers in :meth:`.GroupBy.apply`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In previous versions of pandas, if it was inferred that the function passed to
+:meth:`.GroupBy.apply` was a transformer (i.e. the resulting index was equal to
+the input index), the ``group_keys`` argument of :meth:`DataFrame.groupby` and
+:meth:`Series.groupby` was ignored and the group keys would never be added to
+the index of the result. In the future, the group keys will be added to the index
+when the user specifies ``group_keys=True``.
+
+As ``group_keys=True`` is the default value of :meth:`DataFrame.groupby` and
+:meth:`Series.groupby`, not specifying ``group_keys`` with a transformer will
+raise a ``FutureWarning``. This can be silenced and the previous behavior
+retained by specifying ``group_keys=False``.
+
.. _whatsnew_150.deprecations.other:
Other Deprecations
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 0013ddf73cddc..1a7cf9fae8db4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7864,6 +7864,27 @@ def update(
a 13.0 13.0
b 12.3 123.0
NaN 12.3 33.0
+
+When using ``.apply()``, use ``group_keys`` to include or exclude the group keys.
+The ``group_keys`` argument defaults to ``True`` (include).
+
+>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
+... 'Parrot', 'Parrot'],
+... 'Max Speed': [380., 370., 24., 26.]})
+>>> df.groupby("Animal", group_keys=True).apply(lambda x: x)
+ Animal Max Speed
+Animal
+Falcon 0 Falcon 380.0
+ 1 Falcon 370.0
+Parrot 2 Parrot 24.0
+ 3 Parrot 26.0
+
+>>> df.groupby("Animal", group_keys=False).apply(lambda x: x)
+ Animal Max Speed
+0 Falcon 380.0
+1 Falcon 370.0
+2 Parrot 24.0
+3 Parrot 26.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
@@ -7874,7 +7895,7 @@ def groupby(
level: Level | None = None,
as_index: bool = True,
sort: bool = True,
- group_keys: bool = True,
+ group_keys: bool | lib.NoDefault = no_default,
squeeze: bool | lib.NoDefault = no_default,
observed: bool = False,
dropna: bool = True,
@@ -10819,6 +10840,7 @@ def resample(
level=None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
+ group_keys: bool | lib.NoDefault = no_default,
) -> Resampler:
return super().resample(
rule=rule,
@@ -10833,6 +10855,7 @@ def resample(
level=level,
origin=origin,
offset=offset,
+ group_keys=group_keys,
)
def to_timestamp(
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 98e0ab43f2a09..700a8f6a39f8d 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -8041,6 +8041,7 @@ def resample(
level=None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
+ group_keys: bool_t | lib.NoDefault = lib.no_default,
) -> Resampler:
"""
Resample time-series data.
@@ -8115,6 +8116,17 @@ def resample(
.. versionadded:: 1.1.0
+ group_keys : bool, optional
+ Whether to include the group keys in the result index when using
+ ``.apply()`` on the resampled object. Not specifying ``group_keys``
+ will retain values-dependent behavior from pandas 1.4
+ and earlier (see :ref:`pandas 1.5.0 Release notes
+ <whatsnew_150.enhancements.resample_group_keys>`
+ for examples). In a future version of pandas, the behavior will
+ default to the same as specifying ``group_keys=False``.
+
+ .. versionadded:: 1.5.0
+
Returns
-------
pandas.core.Resampler
@@ -8454,6 +8466,7 @@ def resample(
level=level,
origin=origin,
offset=offset,
+ group_keys=group_keys,
)
@final
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 0811adbeeeda0..76511cb3eb48c 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -357,6 +357,7 @@ def _wrap_applied_output(
data: Series,
values: list[Any],
not_indexed_same: bool = False,
+ override_group_keys: bool = False,
) -> DataFrame | Series:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
@@ -395,7 +396,11 @@ def _wrap_applied_output(
res_ser.name = self.obj.name
return res_ser
elif isinstance(values[0], (Series, DataFrame)):
- return self._concat_objects(values, not_indexed_same=not_indexed_same)
+ return self._concat_objects(
+ values,
+ not_indexed_same=not_indexed_same,
+ override_group_keys=override_group_keys,
+ )
else:
# GH #6265 #24880
result = self.obj._constructor(
@@ -983,7 +988,11 @@ def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame:
return res_df
def _wrap_applied_output(
- self, data: DataFrame, values: list, not_indexed_same: bool = False
+ self,
+ data: DataFrame,
+ values: list,
+ not_indexed_same: bool = False,
+ override_group_keys: bool = False,
):
if len(values) == 0:
@@ -1000,7 +1009,11 @@ def _wrap_applied_output(
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif isinstance(first_not_none, DataFrame):
- return self._concat_objects(values, not_indexed_same=not_indexed_same)
+ return self._concat_objects(
+ values,
+ not_indexed_same=not_indexed_same,
+ override_group_keys=override_group_keys,
+ )
key_index = self.grouper.result_index if self.as_index else None
@@ -1026,7 +1039,11 @@ def _wrap_applied_output(
else:
# values are Series
return self._wrap_applied_output_series(
- values, not_indexed_same, first_not_none, key_index
+ values,
+ not_indexed_same,
+ first_not_none,
+ key_index,
+ override_group_keys,
)
def _wrap_applied_output_series(
@@ -1035,6 +1052,7 @@ def _wrap_applied_output_series(
not_indexed_same: bool,
first_not_none,
key_index,
+ override_group_keys: bool,
) -> DataFrame | Series:
# this is to silence a DeprecationWarning
# TODO(2.0): Remove when default dtype of empty Series is object
@@ -1058,7 +1076,11 @@ def _wrap_applied_output_series(
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
- return self._concat_objects(values, not_indexed_same=not_indexed_same)
+ return self._concat_objects(
+ values,
+ not_indexed_same=not_indexed_same,
+ override_group_keys=override_group_keys,
+ )
# still a series
# path added as of GH 5545
@@ -1069,7 +1091,11 @@ def _wrap_applied_output_series(
if not all_indexed_same:
# GH 8467
- return self._concat_objects(values, not_indexed_same=True)
+ return self._concat_objects(
+ values,
+ not_indexed_same=True,
+ override_group_keys=override_group_keys,
+ )
# Combine values
# vstack+constructor is faster than concat and handles MI-columns
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 41e5aa628fcc8..3089a6b8c16ae 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -584,7 +584,7 @@ class BaseGroupBy(PandasObject, SelectionMixin[NDFrameT], GroupByIndexingMixin):
axis: int
grouper: ops.BaseGrouper
- group_keys: bool
+ group_keys: bool | lib.NoDefault
@final
def __len__(self) -> int:
@@ -850,7 +850,7 @@ def __init__(
selection: IndexLabel | None = None,
as_index: bool = True,
sort: bool = True,
- group_keys: bool = True,
+ group_keys: bool | lib.NoDefault = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
@@ -951,9 +951,12 @@ def curried(x):
if name in base.plotting_methods:
return self.apply(curried)
- result = self._python_apply_general(curried, self._obj_with_exclusions)
+ is_transform = name in base.transformation_kernels
+ result = self._python_apply_general(
+ curried, self._obj_with_exclusions, is_transform=is_transform
+ )
- if self.grouper.has_dropped_na and name in base.transformation_kernels:
+ if self.grouper.has_dropped_na and is_transform:
# result will have dropped rows due to nans, fill with null
# and ensure index is ordered same as the input
result = self._set_result_index_ordered(result)
@@ -1023,7 +1026,12 @@ def _iterate_slices(self) -> Iterable[Series]:
# Dispatch/Wrapping
@final
- def _concat_objects(self, values, not_indexed_same: bool = False):
+ def _concat_objects(
+ self,
+ values,
+ not_indexed_same: bool = False,
+ override_group_keys: bool = False,
+ ):
from pandas.core.reshape.concat import concat
def reset_identity(values):
@@ -1034,28 +1042,7 @@ def reset_identity(values):
ax._reset_identity()
return values
- if not not_indexed_same:
- result = concat(values, axis=self.axis)
-
- ax = self._selected_obj._get_axis(self.axis)
- if self.dropna:
- labels = self.grouper.group_info[0]
- mask = labels != -1
- ax = ax[mask]
-
- # this is a very unfortunate situation
- # we can't use reindex to restore the original order
- # when the ax has duplicates
- # so we resort to this
- # GH 14776, 30667
- if ax.has_duplicates and not result.axes[self.axis].equals(ax):
- indexer, _ = result.index.get_indexer_non_unique(ax._values)
- indexer = algorithms.unique1d(indexer)
- result = result.take(indexer, axis=self.axis)
- else:
- result = result.reindex(ax, axis=self.axis, copy=False)
-
- elif self.group_keys:
+ if self.group_keys and not override_group_keys:
values = reset_identity(values)
if self.as_index:
@@ -1079,6 +1066,28 @@ def reset_identity(values):
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
+
+ elif not not_indexed_same:
+ result = concat(values, axis=self.axis)
+
+ ax = self._selected_obj._get_axis(self.axis)
+ if self.dropna:
+ labels = self.grouper.group_info[0]
+ mask = labels != -1
+ ax = ax[mask]
+
+ # this is a very unfortunate situation
+ # we can't use reindex to restore the original order
+ # when the ax has duplicates
+ # so we resort to this
+ # GH 14776, 30667
+ if ax.has_duplicates and not result.axes[self.axis].equals(ax):
+ indexer, _ = result.index.get_indexer_non_unique(ax._values)
+ indexer = algorithms.unique1d(indexer)
+ result = result.take(indexer, axis=self.axis)
+ else:
+ result = result.reindex(ax, axis=self.axis, copy=False)
+
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
@@ -1205,7 +1214,13 @@ def _wrap_transformed_output(
result.index = self.obj.index
return result
- def _wrap_applied_output(self, data, values: list, not_indexed_same: bool = False):
+ def _wrap_applied_output(
+ self,
+ data,
+ values: list,
+ not_indexed_same: bool = False,
+ override_group_keys: bool = False,
+ ):
raise AbstractMethodError(self)
def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool:
@@ -1419,6 +1434,8 @@ def _python_apply_general(
f: Callable,
data: DataFrame | Series,
not_indexed_same: bool | None = None,
+ is_transform: bool = False,
+ is_agg: bool = False,
) -> NDFrameT:
"""
Apply function f in python space
@@ -1433,6 +1450,15 @@ def _python_apply_general(
When specified, overrides the value of not_indexed_same. Apply behaves
differently when the result index is equal to the input index, but
this can be coincidental leading to value-dependent behavior.
+ is_transform : bool, default False
+ Indicator for whether the function is actually a transform
+ and should not have group keys prepended. This is used
+ in _make_wrapper which generates both transforms (e.g. diff)
+ and non-transforms (e.g. corr)
+ is_agg : bool, default False
+ Indicator for whether the function is an aggregation. When the
+ result is empty, we don't want to warn for this case.
+ See _GroupBy._python_agg_general.
Returns
-------
@@ -1440,12 +1466,39 @@ def _python_apply_general(
data after applying f
"""
values, mutated = self.grouper.apply(f, data, self.axis)
-
if not_indexed_same is None:
not_indexed_same = mutated or self.mutated
+ override_group_keys = False
+
+ is_empty_agg = is_agg and len(values) == 0
+ if (not not_indexed_same and self.group_keys is lib.no_default) and not (
+ is_transform or is_empty_agg
+ ):
+ # We've detected value-dependent behavior: the result's index depends on
+ # whether the user's function `f` returned the same index or not.
+ msg = (
+ "Not prepending group keys to the result index of "
+ "transform-like apply. In the future, the group keys "
+ "will be included in the index, regardless of whether "
+ "the applied function returns a like-indexed object.\n"
+ "To preserve the previous behavior, use\n\n\t"
+ ">>> .groupby(..., group_keys=False)\n\n"
+ "To adopt the future behavior and silence this warning, use "
+ "\n\n\t>>> .groupby(..., group_keys=True)"
+ )
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
+ # We want to behave as if `self.group_keys=False` when reconstructing
+ # the object. However, we don't want to mutate the stateful GroupBy
+ # object, so we just override it.
+ # When this deprecation is enforced then override_group_keys
+ # may be removed.
+ override_group_keys = True
return self._wrap_applied_output(
- data, values, not_indexed_same=not_indexed_same
+ data,
+ values,
+ not_indexed_same,
+ override_group_keys=is_transform or override_group_keys,
)
@final
@@ -1458,7 +1511,7 @@ def _python_agg_general(self, func, *args, **kwargs):
if self.ngroups == 0:
# agg_series below assumes ngroups > 0
- return self._python_apply_general(f, self._selected_obj)
+ return self._python_apply_general(f, self._selected_obj, is_agg=True)
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
@@ -2400,7 +2453,11 @@ def ohlc(self) -> DataFrame:
@doc(DataFrame.describe)
def describe(self, **kwargs):
with self._group_selection_context():
- result = self.apply(lambda x: x.describe(**kwargs))
+ result = self._python_apply_general(
+ lambda x: x.describe(**kwargs),
+ self._selected_obj,
+ not_indexed_same=True,
+ )
if self.axis == 1:
return result.T
return result.unstack()
@@ -3284,7 +3341,11 @@ def rank(
if axis != 0:
# DataFrame uses different keyword name
kwargs["method"] = kwargs.pop("ties_method")
- return self.apply(lambda x: x.rank(axis=axis, numeric_only=False, **kwargs))
+ f = lambda x: x.rank(axis=axis, numeric_only=False, **kwargs)
+ result = self._python_apply_general(
+ f, self._selected_obj, is_transform=True
+ )
+ return result
return self._cython_transform(
"rank",
@@ -3306,7 +3367,8 @@ def cumprod(self, axis=0, *args, **kwargs):
"""
nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
- return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
+ f = lambda x: x.cumprod(axis=axis, **kwargs)
+ return self._python_apply_general(f, self._selected_obj, is_transform=True)
return self._cython_transform("cumprod", **kwargs)
@@ -3323,7 +3385,8 @@ def cumsum(self, axis=0, *args, **kwargs):
"""
nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
- return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
+ f = lambda x: x.cumsum(axis=axis, **kwargs)
+ return self._python_apply_general(f, self._selected_obj, is_transform=True)
return self._cython_transform("cumsum", **kwargs)
@@ -3340,7 +3403,8 @@ def cummin(self, axis=0, **kwargs):
"""
skipna = kwargs.get("skipna", True)
if axis != 0:
- return self.apply(lambda x: np.minimum.accumulate(x, axis))
+ f = lambda x: np.minimum.accumulate(x, axis)
+ return self._python_apply_general(f, self._selected_obj, is_transform=True)
return self._cython_transform("cummin", numeric_only=False, skipna=skipna)
@@ -3357,7 +3421,8 @@ def cummax(self, axis=0, **kwargs):
"""
skipna = kwargs.get("skipna", True)
if axis != 0:
- return self.apply(lambda x: np.maximum.accumulate(x, axis))
+ f = lambda x: np.maximum.accumulate(x, axis)
+ return self._python_apply_general(f, self._selected_obj, is_transform=True)
return self._cython_transform("cummax", numeric_only=False, skipna=skipna)
@@ -3538,7 +3603,8 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None):
if available.
"""
if freq is not None or axis != 0:
- return self.apply(lambda x: x.shift(periods, freq, axis, fill_value))
+ f = lambda x: x.shift(periods, freq, axis, fill_value)
+ return self._python_apply_general(f, self._selected_obj, is_transform=True)
ids, _, ngroups = self.grouper.group_info
res_indexer = np.zeros(len(ids), dtype=np.int64)
@@ -3610,20 +3676,22 @@ def pct_change(self, periods=1, fill_method="ffill", limit=None, freq=None, axis
# TODO(GH#23918): Remove this conditional for SeriesGroupBy when
# GH#23918 is fixed
if freq is not None or axis != 0:
- return self.apply(
- lambda x: x.pct_change(
- periods=periods,
- fill_method=fill_method,
- limit=limit,
- freq=freq,
- axis=axis,
- )
+ f = lambda x: x.pct_change(
+ periods=periods,
+ fill_method=fill_method,
+ limit=limit,
+ freq=freq,
+ axis=axis,
)
+ return self._python_apply_general(f, self._selected_obj, is_transform=True)
+
if fill_method is None: # GH30463
fill_method = "ffill"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
- fill_grp = filled.groupby(self.grouper.codes, axis=self.axis)
+ fill_grp = filled.groupby(
+ self.grouper.codes, axis=self.axis, group_keys=self.group_keys
+ )
shifted = fill_grp.shift(periods=periods, freq=freq, axis=self.axis)
return (filled / shifted) - 1
@@ -3968,7 +4036,7 @@ def get_groupby(
selection=None,
as_index: bool = True,
sort: bool = True,
- group_keys: bool = True,
+ group_keys: bool | lib.NoDefault = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 856e9e8b56930..209433a45f8b2 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -149,6 +149,7 @@ def __init__(
axis: int = 0,
kind=None,
*,
+ group_keys: bool | lib.NoDefault = lib.no_default,
selection=None,
**kwargs,
) -> None:
@@ -158,7 +159,7 @@ def __init__(
self.axis = axis
self.kind = kind
self.squeeze = False
- self.group_keys = True
+ self.group_keys = group_keys
self.as_index = True
self.groupby._set_grouper(self._convert_obj(obj), sort=True)
@@ -409,7 +410,9 @@ def _gotitem(self, key, ndim: int, subset=None):
grouper = self.grouper
if subset is None:
subset = self.obj
- grouped = get_groupby(subset, by=None, grouper=grouper, axis=self.axis)
+ grouped = get_groupby(
+ subset, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
+ )
# try the key selection
try:
@@ -424,8 +427,9 @@ def _groupby_and_aggregate(self, how, *args, **kwargs):
grouper = self.grouper
obj = self._selected_obj
-
- grouped = get_groupby(obj, by=None, grouper=grouper, axis=self.axis)
+ grouped = get_groupby(
+ obj, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys
+ )
try:
if isinstance(obj, ABCDataFrame) and callable(how):
@@ -1477,6 +1481,7 @@ def __init__(
base: int | None = None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
+ group_keys: bool | lib.NoDefault = True,
**kwargs,
) -> None:
# Check for correctness of the keyword arguments which would
@@ -1525,6 +1530,7 @@ def __init__(
self.how = how
self.fill_method = fill_method
self.limit = limit
+ self.group_keys = group_keys
if origin in ("epoch", "start", "start_day", "end", "end_day"):
self.origin = origin
@@ -1590,11 +1596,17 @@ def _get_resampler(self, obj, kind=None):
ax = self.ax
if isinstance(ax, DatetimeIndex):
- return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis)
+ return DatetimeIndexResampler(
+ obj, groupby=self, kind=kind, axis=self.axis, group_keys=self.group_keys
+ )
elif isinstance(ax, PeriodIndex) or kind == "period":
- return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis)
+ return PeriodIndexResampler(
+ obj, groupby=self, kind=kind, axis=self.axis, group_keys=self.group_keys
+ )
elif isinstance(ax, TimedeltaIndex):
- return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis)
+ return TimedeltaIndexResampler(
+ obj, groupby=self, axis=self.axis, group_keys=self.group_keys
+ )
raise TypeError(
"Only valid with DatetimeIndex, "
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 00384ec26f71d..83c5e8206952c 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1928,7 +1928,7 @@ def groupby(
level=None,
as_index: bool = True,
sort: bool = True,
- group_keys: bool = True,
+ group_keys: bool | lib.NoDefault = no_default,
squeeze: bool | lib.NoDefault = no_default,
observed: bool = False,
dropna: bool = True,
@@ -5561,6 +5561,7 @@ def resample(
level=None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
+ group_keys: bool | lib.NoDefault = no_default,
) -> Resampler:
return super().resample(
rule=rule,
@@ -5575,6 +5576,7 @@ def resample(
level=level,
origin=origin,
offset=offset,
+ group_keys=group_keys,
)
def to_timestamp(self, freq=None, how="start", copy=True) -> Series:
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py
index 0d7b9466a37f0..17b5f0b70d34f 100644
--- a/pandas/core/shared_docs.py
+++ b/pandas/core/shared_docs.py
@@ -113,8 +113,17 @@
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
-group_keys : bool, default True
+group_keys : bool, optional
When calling apply, add group keys to index to identify pieces.
+ By default group keys are not included when the result's index
+ (and column) labels match the inputs, and are included otherwise.
+
+ .. versionchanged:: 1.5.0
+
+ Warns that `group_keys` will no longer be ignored when the
+ result from ``apply`` is a like-indexed Series or DataFrame.
+ Specify ``group_keys`` explicitly to include the group keys or
+ not.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py
index db0190d488d42..336865d32167d 100644
--- a/pandas/tests/extension/base/groupby.py
+++ b/pandas/tests/extension/base/groupby.py
@@ -68,10 +68,10 @@ def test_groupby_extension_transform(self, data_for_grouping):
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
- df.groupby("B").apply(groupby_apply_op)
- df.groupby("B").A.apply(groupby_apply_op)
- df.groupby("A").apply(groupby_apply_op)
- df.groupby("A").B.apply(groupby_apply_op)
+ df.groupby("B", group_keys=False).apply(groupby_apply_op)
+ df.groupby("B", group_keys=False).A.apply(groupby_apply_op)
+ df.groupby("A", group_keys=False).apply(groupby_apply_op)
+ df.groupby("A", group_keys=False).B.apply(groupby_apply_op)
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py
index 710e83c0c48a4..e45bffba944c0 100644
--- a/pandas/tests/extension/test_boolean.py
+++ b/pandas/tests/extension/test_boolean.py
@@ -314,10 +314,10 @@ def test_groupby_extension_transform(self, data_for_grouping):
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
- df.groupby("B").apply(groupby_apply_op)
- df.groupby("B").A.apply(groupby_apply_op)
- df.groupby("A").apply(groupby_apply_op)
- df.groupby("A").B.apply(groupby_apply_op)
+ df.groupby("B", group_keys=False).apply(groupby_apply_op)
+ df.groupby("B", group_keys=False).A.apply(groupby_apply_op)
+ df.groupby("A", group_keys=False).apply(groupby_apply_op)
+ df.groupby("A", group_keys=False).B.apply(groupby_apply_op)
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py
index ee181101a181a..148059a6a16f3 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/test_numpy.py
@@ -227,16 +227,7 @@ def test_getitem_scalar(self, data):
class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests):
- def test_groupby_extension_apply(
- self, data_for_grouping, groupby_apply_op, request
- ):
- dummy = groupby_apply_op([None])
- if (
- isinstance(dummy, pd.Series)
- and data_for_grouping.dtype.numpy_dtype == object
- ):
- mark = pytest.mark.xfail(reason="raises in MultiIndex construction")
- request.node.add_marker(mark)
+ def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op)
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py
index 431029c407afc..9efc2bf53439a 100644
--- a/pandas/tests/generic/test_finalize.py
+++ b/pandas/tests/generic/test_finalize.py
@@ -748,7 +748,7 @@ def test_categorical_accessor(method):
)
def test_groupby_finalize(obj, method):
obj.attrs = {"a": 1}
- result = method(obj.groupby([0, 0]))
+ result = method(obj.groupby([0, 0], group_keys=False))
assert result.attrs == {"a": 1}
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py
index 1ea44871eea4d..cae3bdf1a8f86 100644
--- a/pandas/tests/groupby/aggregate/test_aggregate.py
+++ b/pandas/tests/groupby/aggregate/test_aggregate.py
@@ -133,7 +133,7 @@ def test_groupby_aggregation_multi_level_column():
def test_agg_apply_corner(ts, tsframe):
# nothing to group, all NA
- grouped = ts.groupby(ts * np.nan)
+ grouped = ts.groupby(ts * np.nan, group_keys=False)
assert ts.dtype == np.float64
# groupby float64 values results in Float64Index
@@ -143,7 +143,7 @@ def test_agg_apply_corner(ts, tsframe):
tm.assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
- grouped = tsframe.groupby(tsframe["A"] * np.nan)
+ grouped = tsframe.groupby(tsframe["A"] * np.nan, group_keys=False)
exp_df = DataFrame(
columns=tsframe.columns,
dtype=float,
@@ -914,7 +914,7 @@ def test_groupby_aggregate_empty_key_empty_return():
def test_groupby_aggregate_empty_with_multiindex_frame():
# GH 39178
df = DataFrame(columns=["a", "b", "c"])
- result = df.groupby(["a", "b"]).agg(d=("c", list))
+ result = df.groupby(["a", "b"], group_keys=False).agg(d=("c", list))
expected = DataFrame(
columns=["d"], index=MultiIndex([[], []], [[], []], names=["a", "b"])
)
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py
index ba8b77f8acec3..b2de4a8144ff9 100644
--- a/pandas/tests/groupby/test_apply.py
+++ b/pandas/tests/groupby/test_apply.py
@@ -54,7 +54,9 @@ def test_apply_issues():
["2011.05.16", "2011.05.17", "2011.05.18"], dtype=object, name="date"
)
expected = Series(["00:00", "02:00", "02:00"], index=exp_idx)
- result = df.groupby("date").apply(lambda x: x["time"][x["value"].idxmax()])
+ result = df.groupby("date", group_keys=False).apply(
+ lambda x: x["time"][x["value"].idxmax()]
+ )
tm.assert_series_equal(result, expected)
@@ -80,7 +82,9 @@ def test_apply_trivial_fail():
columns=["key", "data"],
)
expected = pd.concat([df, df], axis=1, keys=["float64", "object"])
- result = df.groupby([str(x) for x in df.dtypes], axis=1).apply(lambda x: df)
+ result = df.groupby([str(x) for x in df.dtypes], axis=1, group_keys=True).apply(
+ lambda x: df
+ )
tm.assert_frame_equal(result, expected)
@@ -156,7 +160,7 @@ def f_constant_df(group):
for func in [f_copy, f_nocopy, f_scalar, f_none, f_constant_df]:
del names[:]
- df.groupby("a").apply(func)
+ df.groupby("a", group_keys=False).apply(func)
assert names == group_names
@@ -174,7 +178,9 @@ def test_group_apply_once_per_group2(capsys):
index=["0", "2", "4", "6", "8", "10", "12", "14"],
)
- df.groupby("group_by_column").apply(lambda df: print("function_called"))
+ df.groupby("group_by_column", group_keys=False).apply(
+ lambda df: print("function_called")
+ )
result = capsys.readouterr().out.count("function_called")
# If `groupby` behaves unexpectedly, this test will break
@@ -194,8 +200,8 @@ def slow(group):
def fast(group):
return group.copy()
- fast_df = df.groupby("A").apply(fast)
- slow_df = df.groupby("A").apply(slow)
+ fast_df = df.groupby("A", group_keys=False).apply(fast)
+ slow_df = df.groupby("A", group_keys=False).apply(slow)
tm.assert_frame_equal(fast_df, slow_df)
@@ -217,7 +223,7 @@ def test_groupby_apply_identity_maybecopy_index_identical(func):
df = DataFrame({"g": [1, 2, 2, 2], "a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})
- result = df.groupby("g").apply(func)
+ result = df.groupby("g", group_keys=False).apply(func)
tm.assert_frame_equal(result, df)
@@ -274,7 +280,7 @@ def test_groupby_as_index_apply():
ind = Index(list("abcde"))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
- res = df.groupby(0, as_index=False).apply(lambda x: x).index
+ res = df.groupby(0, as_index=False, group_keys=False).apply(lambda x: x).index
tm.assert_index_equal(res, ind)
@@ -324,7 +330,7 @@ def f(piece):
dr = bdate_range("1/1/2000", periods=100)
ts = Series(np.random.randn(100), index=dr)
- grouped = ts.groupby(lambda x: x.month)
+ grouped = ts.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(f)
assert isinstance(result, DataFrame)
@@ -388,7 +394,7 @@ def trans2(group):
def test_apply_transform(ts):
- grouped = ts.groupby(lambda x: x.month)
+ grouped = ts.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
tm.assert_series_equal(result, expected)
@@ -405,12 +411,18 @@ def f(group):
tm.assert_frame_equal(result.loc[key], f(group))
-def test_apply_chunk_view():
+@pytest.mark.parametrize("group_keys", [True, False])
+def test_apply_chunk_view(group_keys):
# Low level tinkering could be unsafe, make sure not
df = DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
- result = df.groupby("key", group_keys=False).apply(lambda x: x.iloc[:2])
+ result = df.groupby("key", group_keys=group_keys).apply(lambda x: x.iloc[:2])
expected = df.take([0, 1, 3, 4, 6, 7])
+ if group_keys:
+ expected.index = MultiIndex.from_arrays(
+ [[1, 1, 2, 2, 3, 3], expected.index], names=["key", None]
+ )
+
tm.assert_frame_equal(result, expected)
@@ -442,7 +454,7 @@ def f(group):
group["v2"] = (v - v.min()) / (v.max() - v.min())
return group
- result = df.groupby("d").apply(f)
+ result = df.groupby("d", group_keys=False).apply(f)
expected = df.copy()
expected["v2"] = np.tile([0.0, 0.5, 1], 2)
@@ -466,7 +478,7 @@ def f(group):
group["v2"] = (v - v.min()) / (v.max() - v.min())
return group
- result = df.groupby("d").apply(f)
+ result = df.groupby("d", group_keys=False).apply(f)
expected = df.copy()
expected["v2"] = np.tile([0.0, 0.5, 1], 2)
@@ -475,7 +487,7 @@ def f(group):
def test_apply_corner(tsframe):
- result = tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
+ result = tsframe.groupby(lambda x: x.year, group_keys=False).apply(lambda x: x * 2)
expected = tsframe * 2
tm.assert_frame_equal(result, expected)
@@ -517,14 +529,14 @@ def test_apply_with_duplicated_non_sorted_axis(test_series):
)
if test_series:
ser = df.set_index("Y")["X"]
- result = ser.groupby(level=0).apply(lambda x: x)
+ result = ser.groupby(level=0, group_keys=False).apply(lambda x: x)
# not expecting the order to remain the same for duplicated axis
result = result.sort_index()
expected = ser.sort_index()
tm.assert_series_equal(result, expected)
else:
- result = df.groupby("Y").apply(lambda x: x)
+ result = df.groupby("Y", group_keys=False).apply(lambda x: x)
# not expecting the order to remain the same for duplicated axis
result = result.sort_values("Y")
@@ -546,7 +558,7 @@ def reindex_helper(x):
return x.reindex(np.arange(x.index.min(), x.index.max() + 1))
# the following group by raised a ValueError
- result = df.groupby("group").value.apply(reindex_helper)
+ result = df.groupby("group", group_keys=False).value.apply(reindex_helper)
tm.assert_series_equal(expected, result)
@@ -563,7 +575,7 @@ def test_apply_corner_cases():
}
)
- grouped = df.groupby("key")
+ grouped = df.groupby("key", group_keys=False)
def f(g):
g["value3"] = g["value1"] * 2
@@ -774,7 +786,7 @@ def test_groupby_apply_return_empty_chunk():
def test_apply_with_mixed_types():
# gh-20949
df = DataFrame({"A": "a a b".split(), "B": [1, 2, 3], "C": [4, 6, 5]})
- g = df.groupby("A")
+ g = df.groupby("A", group_keys=False)
result = g.transform(lambda x: x / x.sum())
expected = DataFrame({"B": [1 / 3.0, 2 / 3.0, 1], "C": [0.4, 0.6, 1.0]})
@@ -901,7 +913,7 @@ def test_groupby_apply_datetime_result_dtypes():
def test_apply_index_has_complex_internals(index):
# GH 31248
df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
- result = df.groupby("group").apply(lambda x: x)
+ result = df.groupby("group", group_keys=False).apply(lambda x: x)
tm.assert_frame_equal(result, df)
@@ -969,6 +981,55 @@ def test_apply_function_with_indexing_return_column():
tm.assert_frame_equal(result, expected)
+@pytest.mark.parametrize(
+ "udf",
+ [(lambda x: x.copy()), (lambda x: x.copy().rename(lambda y: y + 1))],
+)
+@pytest.mark.parametrize("group_keys", [True, False])
+def test_apply_result_type(group_keys, udf):
+ # https://github.com/pandas-dev/pandas/issues/34809
+ # We'd like to control whether the group keys end up in the index
+ # regardless of whether the UDF happens to be a transform.
+ df = DataFrame({"A": ["a", "b"], "B": [1, 2]})
+ df_result = df.groupby("A", group_keys=group_keys).apply(udf)
+ series_result = df.B.groupby(df.A, group_keys=group_keys).apply(udf)
+
+ if group_keys:
+ assert df_result.index.nlevels == 2
+ assert series_result.index.nlevels == 2
+ else:
+ assert df_result.index.nlevels == 1
+ assert series_result.index.nlevels == 1
+
+
+def test_result_order_group_keys_false():
+ # GH 34998
+ # apply result order should not depend on whether index is the same or just equal
+ df = DataFrame({"A": [2, 1, 2], "B": [1, 2, 3]})
+ result = df.groupby("A", group_keys=False).apply(lambda x: x)
+ expected = df.groupby("A", group_keys=False).apply(lambda x: x.copy())
+ tm.assert_frame_equal(result, expected)
+
+
+def test_groupby_apply_group_keys_warns():
+ df = DataFrame({"A": [0, 1, 1], "B": [1, 2, 3]})
+ msg = "Not prepending group keys to the result index"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A").apply(lambda x: x)
+
+ tm.assert_frame_equal(result, df)
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df.groupby("A")["B"].apply(lambda x: x)
+
+ tm.assert_series_equal(result, df["B"])
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ result = df["B"].groupby(df["A"]).apply(lambda x: x)
+
+ tm.assert_series_equal(result, df["B"])
+
+
def test_apply_with_timezones_aware():
# GH: 27212
dates = ["2001-01-01"] * 2 + ["2001-01-02"] * 2 + ["2001-01-03"] * 2
@@ -1073,7 +1134,7 @@ def test_apply_dropna_with_indexed_same(dropna):
},
index=list("xxyxz"),
)
- result = df.groupby("group", dropna=dropna).apply(lambda x: x)
+ result = df.groupby("group", dropna=dropna, group_keys=False).apply(lambda x: x)
expected = df.dropna() if dropna else df.iloc[[0, 3, 1, 2, 4]]
tm.assert_frame_equal(result, expected)
@@ -1128,9 +1189,9 @@ def test_positional_slice_groups_datetimelike():
"let": list("abcde"),
}
)
- result = expected.groupby([expected.let, expected.date.dt.date]).apply(
- lambda x: x.iloc[0:]
- )
+ result = expected.groupby(
+ [expected.let, expected.date.dt.date], group_keys=False
+ ).apply(lambda x: x.iloc[0:])
tm.assert_frame_equal(result, expected)
@@ -1245,7 +1306,7 @@ def test_apply_index_key_error_bug(index_values):
def test_apply_nonmonotonic_float_index(arg, idx):
# GH 34455
expected = DataFrame({"col": arg}, index=idx)
- result = expected.groupby("col").apply(lambda x: x)
+ result = expected.groupby("col", group_keys=False).apply(lambda x: x)
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_apply_mutate.py b/pandas/tests/groupby/test_apply_mutate.py
index 01fe7512c0fe9..36e117cf03353 100644
--- a/pandas/tests/groupby/test_apply_mutate.py
+++ b/pandas/tests/groupby/test_apply_mutate.py
@@ -13,8 +13,10 @@ def test_group_by_copy():
}
).set_index("name")
- grp_by_same_value = df.groupby(["age"]).apply(lambda group: group)
- grp_by_copy = df.groupby(["age"]).apply(lambda group: group.copy())
+ grp_by_same_value = df.groupby(["age"], group_keys=False).apply(lambda group: group)
+ grp_by_copy = df.groupby(["age"], group_keys=False).apply(
+ lambda group: group.copy()
+ )
tm.assert_frame_equal(grp_by_same_value, grp_by_copy)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 7440b63e78b65..42cce74c5c01d 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -545,7 +545,7 @@ def test_groupby_cumprod():
df = DataFrame({"key": ["b"] * 10, "value": 2})
actual = df.groupby("key")["value"].cumprod()
- expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
+ expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
@@ -554,7 +554,7 @@ def test_groupby_cumprod():
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df["value"] = df["value"].astype(float)
- expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
+ expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
@@ -734,7 +734,7 @@ def test_cummin(dtypes_for_minmax):
expected = DataFrame({"B": expected_mins}).astype(dtype)
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
- result = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
+ result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ min value for dtype
@@ -744,7 +744,9 @@ def test_cummin(dtypes_for_minmax):
expected.loc[[1, 5], "B"] = min_val + 1 # should not be rounded to min_val
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected, check_exact=True)
- expected = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
+ expected = (
+ df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()
+ )
tm.assert_frame_equal(result, expected, check_exact=True)
# Test nan in some values
@@ -752,7 +754,9 @@ def test_cummin(dtypes_for_minmax):
expected = DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
- expected = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
+ expected = (
+ base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame()
+ )
tm.assert_frame_equal(result, expected)
# GH 15561
@@ -797,7 +801,7 @@ def test_cummax(dtypes_for_minmax):
expected = DataFrame({"B": expected_maxs}).astype(dtype)
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
- result = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
+ result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ max value for dtype
@@ -805,7 +809,9 @@ def test_cummax(dtypes_for_minmax):
expected.loc[[2, 3, 6, 7], "B"] = max_val
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
- expected = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
+ expected = (
+ df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()
+ )
tm.assert_frame_equal(result, expected)
# Test nan in some values
@@ -813,7 +819,9 @@ def test_cummax(dtypes_for_minmax):
expected = DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]})
result = base_df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
- expected = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
+ expected = (
+ base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame()
+ )
tm.assert_frame_equal(result, expected)
# GH 15561
@@ -1015,6 +1023,11 @@ def test_frame_describe_multikey(tsframe):
groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
+ # reverting the change from https://github.com/pandas-dev/pandas/pull/35441/
+ expected.index = MultiIndex(
+ levels=[[0, 1], expected.index],
+ codes=[[0, 0, 1, 1], range(len(expected.index))],
+ )
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 7bf63bb3c2cac..97e388cd074c3 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -44,7 +44,7 @@ def test_basic(dtype):
np.random.shuffle(index)
data = data.reindex(index)
- grouped = data.groupby(lambda x: x // 3)
+ grouped = data.groupby(lambda x: x // 3, group_keys=False)
for k, v in grouped:
assert len(v) == 3
@@ -637,7 +637,9 @@ def test_as_index_select_column():
expected = Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
- result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
+ result = df.groupby("A", as_index=False, group_keys=True)["B"].apply(
+ lambda x: x.cumsum()
+ )
expected = Series(
[2, 6, 6], name="B", index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
@@ -1472,7 +1474,7 @@ def test_dont_clobber_name_column():
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
- result = df.groupby("key").apply(lambda x: x)
+ result = df.groupby("key", group_keys=False).apply(lambda x: x)
tm.assert_frame_equal(result, df)
@@ -1544,7 +1546,7 @@ def freduce(group):
def foo(x):
return freduce(x)
- grouped = df.groupby(grouper)
+ grouped = df.groupby(grouper, group_keys=False)
# make sure all these work
grouped.apply(f)
@@ -1690,13 +1692,15 @@ def test_groupby_multiindex_not_lexsorted():
for level in [0, 1, [0, 1]]:
for sort in [False, True]:
- result = df.groupby(level=level, sort=sort).apply(DataFrame.drop_duplicates)
+ result = df.groupby(level=level, sort=sort, group_keys=False).apply(
+ DataFrame.drop_duplicates
+ )
expected = df
tm.assert_frame_equal(expected, result)
result = (
df.sort_index()
- .groupby(level=level, sort=sort)
+ .groupby(level=level, sort=sort, group_keys=False)
.apply(DataFrame.drop_duplicates)
)
expected = df.sort_index()
@@ -1911,7 +1915,7 @@ def test_empty_groupby(columns, keys, values, method, op, request, using_array_m
df = df.iloc[:0]
- gb = df.groupby(keys)[columns]
+ gb = df.groupby(keys, group_keys=False)[columns]
def get_result():
if method == "attr":
@@ -2032,7 +2036,7 @@ def test_empty_groupby_apply_nonunique_columns():
df = DataFrame(np.random.randn(0, 4))
df[3] = df[3].astype(np.int64)
df.columns = [0, 1, 2, 0]
- gb = df.groupby(df[1])
+ gb = df.groupby(df[1], group_keys=False)
res = gb.apply(lambda x: x)
assert (res.dtypes == df.dtypes).all()
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index efb0b82f58e97..c6e4bec3f7b2c 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -162,10 +162,10 @@ def test_grouper_index_types(self):
]:
df.index = index(len(df))
- df.groupby(list("abcde")).apply(lambda x: x)
+ df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
df.index = list(reversed(df.index.tolist()))
- df.groupby(list("abcde")).apply(lambda x: x)
+ df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)
def test_grouper_multilevel_freq(self):
@@ -669,7 +669,7 @@ def test_evaluate_with_empty_groups(self, func, expected):
# (not testing other agg fns, because they return
# different index objects.
df = DataFrame({1: [], 2: []})
- g = df.groupby(1)
+ g = df.groupby(1, group_keys=False)
result = getattr(g[2], func)(lambda x: x)
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index d4b21633309db..7c9d6e7a73087 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -593,7 +593,7 @@ def test_groupby_multi_timezone(self):
4,2000-01-01 16:50:00,America/New_York"""
df = pd.read_csv(StringIO(data), header=None, names=["value", "date", "tz"])
- result = df.groupby("tz").date.apply(
+ result = df.groupby("tz", group_keys=False).date.apply(
lambda x: pd.to_datetime(x).dt.tz_localize(x.name)
)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index 3042e38d9014c..c210c79c29426 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -67,7 +67,7 @@ def demean(arr):
)
key = ["one", "two", "one", "two", "one"]
result = people.groupby(key).transform(demean).groupby(key).mean()
- expected = people.groupby(key).apply(demean).groupby(key).mean()
+ expected = people.groupby(key, group_keys=False).apply(demean).groupby(key).mean()
tm.assert_frame_equal(result, expected)
# GH 8430
@@ -228,26 +228,26 @@ def test_transform_axis_ts(tsframe):
)
# monotonic
ts = tso
- grouped = ts.groupby(lambda x: x.weekday())
+ grouped = ts.groupby(lambda x: x.weekday(), group_keys=False)
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: x - x.mean())
tm.assert_frame_equal(result, expected)
ts = ts.T
- grouped = ts.groupby(lambda x: x.weekday(), axis=1)
+ grouped = ts.groupby(lambda x: x.weekday(), axis=1, group_keys=False)
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
tm.assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
- grouped = ts.groupby(lambda x: x.weekday())
+ grouped = ts.groupby(lambda x: x.weekday(), group_keys=False)
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: x - x.mean())
tm.assert_frame_equal(result, expected)
ts = ts.T
- grouped = ts.groupby(lambda x: x.weekday(), axis=1)
+ grouped = ts.groupby(lambda x: x.weekday(), axis=1, group_keys=False)
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
tm.assert_frame_equal(result, expected)
@@ -753,7 +753,7 @@ def test_cython_transform_frame(op, args, targop):
]: # {"by": 'string_missing'}]:
# {"by": ['int','string']}]:
- gb = df.groupby(**gb_target)
+ gb = df.groupby(group_keys=False, **gb_target)
# allowlisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index e71216b261d95..1a25749808820 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -164,7 +164,7 @@ def test_resample_empty_dataframe(empty_frame_dti, freq, resample_method):
# GH13212
df = empty_frame_dti
# count retains dimensions too
- result = getattr(df.resample(freq), resample_method)()
+ result = getattr(df.resample(freq, group_keys=False), resample_method)()
if resample_method != "size":
expected = df.copy()
else:
@@ -220,7 +220,7 @@ def test_resample_empty_dtypes(index, dtype, resample_method):
# them to ensure they no longer do. (GH #10228)
empty_series_dti = Series([], index, dtype)
try:
- getattr(empty_series_dti.resample("d"), resample_method)()
+ getattr(empty_series_dti.resample("d", group_keys=False), resample_method)()
except DataError:
# Ignore these since some combinations are invalid
# (ex: doing mean with dtype of np.object_)
@@ -232,7 +232,7 @@ def test_resample_empty_dtypes(index, dtype, resample_method):
def test_apply_to_empty_series(empty_series_dti, freq):
# GH 14313
ser = empty_series_dti
- result = ser.resample(freq).apply(lambda x: 1)
+ result = ser.resample(freq, group_keys=False).apply(lambda x: 1)
expected = ser.resample(freq).apply(np.sum)
tm.assert_series_equal(result, expected, check_dtype=False)
diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py
index 86e0411ee3334..9148600d31bc2 100644
--- a/pandas/tests/resample/test_resample_api.py
+++ b/pandas/tests/resample/test_resample_api.py
@@ -94,6 +94,31 @@ def test_groupby_resample_on_api():
tm.assert_frame_equal(result, expected)
+def test_resample_group_keys():
+ df = DataFrame({"A": 1, "B": 2}, index=date_range("2000", periods=10))
+ g = df.resample("5D")
+ expected = df.copy()
+ with tm.assert_produces_warning(FutureWarning, match="Not prepending group keys"):
+ result = g.apply(lambda x: x)
+ tm.assert_frame_equal(result, expected)
+
+ # no warning
+ g = df.resample("5D", group_keys=False)
+ with tm.assert_produces_warning(None):
+ result = g.apply(lambda x: x)
+ tm.assert_frame_equal(result, expected)
+
+ # no warning, group keys
+ expected.index = pd.MultiIndex.from_arrays(
+ [pd.to_datetime(["2000-01-01", "2000-01-06"]).repeat(5), expected.index]
+ )
+
+ g = df.resample("5D", group_keys=True)
+ with tm.assert_produces_warning(None):
+ result = g.apply(lambda x: x)
+ tm.assert_frame_equal(result, expected)
+
+
def test_pipe(test_frame):
# GH17905
@@ -275,7 +300,10 @@ def test_fillna():
@pytest.mark.parametrize(
"func",
- [lambda x: x.resample("20min"), lambda x: x.groupby(pd.Grouper(freq="20min"))],
+ [
+ lambda x: x.resample("20min", group_keys=False),
+ lambda x: x.groupby(pd.Grouper(freq="20min"), group_keys=False),
+ ],
ids=["resample", "groupby"],
)
def test_apply_without_aggregation(func):
@@ -285,6 +313,12 @@ def test_apply_without_aggregation(func):
tm.assert_series_equal(result, test_series)
+def test_apply_without_aggregation2():
+ grouped = test_series.to_frame(name="foo").resample("20min", group_keys=False)
+ result = grouped["foo"].apply(lambda x: x)
+ tm.assert_series_equal(result, test_series.rename("foo"))
+
+
def test_agg_consistency():
# make sure that we are consistent across
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 674b86687a7ca..7e428821a2d50 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -89,7 +89,7 @@ def test_groupby_transform(self, multiindex_dataframe_random_data):
s = frame["A"]
grouper = s.index.get_level_values(0)
- grouped = s.groupby(grouper)
+ grouped = s.groupby(grouper, group_keys=False)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py
index 90b9288b77690..b4d0f6562f2d5 100644
--- a/pandas/tests/window/test_groupby.py
+++ b/pandas/tests/window/test_groupby.py
@@ -105,7 +105,7 @@ def test_getitem_multiple(self, roll_frame):
],
)
def test_rolling(self, f, roll_frame):
- g = roll_frame.groupby("A")
+ g = roll_frame.groupby("A", group_keys=False)
r = g.rolling(window=4)
result = getattr(r, f)()
@@ -119,7 +119,7 @@ def test_rolling(self, f, roll_frame):
@pytest.mark.parametrize("f", ["std", "var"])
def test_rolling_ddof(self, f, roll_frame):
- g = roll_frame.groupby("A")
+ g = roll_frame.groupby("A", group_keys=False)
r = g.rolling(window=4)
result = getattr(r, f)(ddof=1)
@@ -135,7 +135,7 @@ def test_rolling_ddof(self, f, roll_frame):
"interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]
)
def test_rolling_quantile(self, interpolation, roll_frame):
- g = roll_frame.groupby("A")
+ g = roll_frame.groupby("A", group_keys=False)
r = g.rolling(window=4)
result = r.quantile(0.4, interpolation=interpolation)
@@ -240,7 +240,7 @@ def test_rolling_corr_cov_unordered(self, func, expected_values):
tm.assert_frame_equal(result, expected)
def test_rolling_apply(self, raw, roll_frame):
- g = roll_frame.groupby("A")
+ g = roll_frame.groupby("A", group_keys=False)
r = g.rolling(window=4)
# reduction
@@ -787,7 +787,7 @@ def test_groupby_rolling_resulting_multiindex3(self):
def test_groupby_rolling_object_doesnt_affect_groupby_apply(self, roll_frame):
# GH 39732
- g = roll_frame.groupby("A")
+ g = roll_frame.groupby("A", group_keys=False)
expected = g.apply(lambda x: x.rolling(4).sum()).index
_ = g.rolling(window=4)
result = g.apply(lambda x: x.rolling(4).sum()).index
@@ -936,7 +936,7 @@ def setup_method(self):
"f", ["sum", "mean", "min", "max", "count", "kurt", "skew"]
)
def test_expanding(self, f):
- g = self.frame.groupby("A")
+ g = self.frame.groupby("A", group_keys=False)
r = g.expanding()
result = getattr(r, f)()
@@ -950,7 +950,7 @@ def test_expanding(self, f):
@pytest.mark.parametrize("f", ["std", "var"])
def test_expanding_ddof(self, f):
- g = self.frame.groupby("A")
+ g = self.frame.groupby("A", group_keys=False)
r = g.expanding()
result = getattr(r, f)(ddof=0)
@@ -966,7 +966,7 @@ def test_expanding_ddof(self, f):
"interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]
)
def test_expanding_quantile(self, interpolation):
- g = self.frame.groupby("A")
+ g = self.frame.groupby("A", group_keys=False)
r = g.expanding()
result = r.quantile(0.4, interpolation=interpolation)
@@ -1009,7 +1009,7 @@ def func(x):
tm.assert_series_equal(result, expected)
def test_expanding_apply(self, raw):
- g = self.frame.groupby("A")
+ g = self.frame.groupby("A", group_keys=False)
r = g.expanding()
# reduction
@@ -1052,12 +1052,10 @@ def test_methods(self, method, expected_data):
with tm.assert_produces_warning(FutureWarning, match="nuisance"):
# GH#42738
- expected = df.groupby("A").apply(
+ expected = df.groupby("A", group_keys=True).apply(
lambda x: getattr(x.ewm(com=1.0), method)()
)
-
- # There may be a bug in the above statement; not returning the correct index
- tm.assert_frame_equal(result.reset_index(drop=True), expected)
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"method, expected_data",
@@ -1129,13 +1127,10 @@ def test_times_vs_apply(self, times_frame):
with tm.assert_produces_warning(FutureWarning, match="nuisance"):
# GH#42738
result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
- expected = (
- times_frame.groupby("A")
- .apply(lambda x: x.ewm(halflife=halflife, times="C").mean())
- .iloc[[0, 3, 6, 9, 1, 4, 7, 2, 5, 8]]
- .reset_index(drop=True)
+ expected = times_frame.groupby("A", group_keys=True).apply(
+ lambda x: x.ewm(halflife=halflife, times="C").mean()
)
- tm.assert_frame_equal(result.reset_index(drop=True), expected)
+ tm.assert_frame_equal(result, expected)
def test_times_array(self, times_frame):
# GH 40951
| Currently we determine whether to include the group keys as a level of the result's MultiIndex by looking at whether the result's index matches the original DataFrame. This provides a keyword to control that behavior so that users can consistently get the group keys or not, regardless of whether the udf happens to be a transform or not. The default behavior remains the same.
I called the keyword `result_group_keys`, but would welcome alternatives.
Closes #34809
Closes #31612
Closes #14927
Closes #13056
Closes #27212
Closes #9704
cc @WillAyd and @jorisvandenbossche from the issue. | https://api.github.com/repos/pandas-dev/pandas/pulls/34998 | 2020-06-25T21:10:36Z | 2022-03-30T15:43:39Z | 2022-03-30T15:43:37Z | 2022-10-29T13:49:07Z |
TST: df.loc[:, 'col'] returning a view, but df.loc[df.index, 'col'] returning a copy | diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index 47980e88f76d4..30b13b6ea9fce 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -894,6 +894,22 @@ def test_identity_slice_returns_new_object(self):
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [7, 8, 9])
+ def test_loc_copy_vs_view(self):
+ # GH 15631
+ x = DataFrame(zip(range(3), range(3)), columns=["a", "b"])
+
+ y = x.copy()
+ q = y.loc[:, "a"]
+ q += 2
+
+ tm.assert_frame_equal(x, y)
+
+ z = x.copy()
+ q = z.loc[x.index, "a"]
+ q += 2
+
+ tm.assert_frame_equal(x, z)
+
def test_loc_uint64(self):
# GH20722
# Test whether loc accept uint64 max value as index.
| - [x] closes #15631
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/34996 | 2020-06-25T17:51:05Z | 2020-07-09T23:41:56Z | 2020-07-09T23:41:56Z | 2020-07-09T23:42:01Z |
DOC: Add example of NonFixedVariableWindowIndexer usage | diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index 897e5d5fb0e24..3a524996ea6d9 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -597,6 +597,18 @@ You can view other examples of ``BaseIndexer`` subclasses `here <https://github.
.. versionadded:: 1.1
+One subclass of note within those examples is the ``NonFixedVariableWindowIndexer`` that allows
+rolling operations over a non-fixed offset like a ``BusinessDay``.
+
+.. ipython:: python
+
+ from pandas.api.indexers import NonFixedVariableWindowIndexer
+ df = pd.DataFrame(range(10), index=pd.date_range('2020', periods=10))
+ offset = pd.offsets.BDay(1)
+ indexer = NonFixedVariableWindowIndexer(index=df.index, offset=offset)
+ df
+ df.rolling(indexer).sum()
+
For some problems knowledge of the future is available for analysis. For example, this occurs when
each data point is a full time series read from an experiment, and the task is to extract underlying
conditions. In these cases it can be useful to perform forward-looking rolling window computations.
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index c5eb2febe8ae9..8a9cf75e34213 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -673,6 +673,7 @@ Other API changes
- ``loc`` lookups with an object-dtype :class:`Index` and an integer key will now raise ``KeyError`` instead of ``TypeError`` when key is missing (:issue:`31905`)
- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median``, ``skew``, ``cov``, ``corr`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`)
- Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations.
+- Added a :func:`pandas.api.indexers.NonFixedVariableWindowIndexer` class to support ``rolling`` operations with non-fixed offsets (:issue:`34994`)
- Added :class:`pandas.errors.InvalidIndexError` (:issue:`34570`).
- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`.
Previously an ``AttributeError`` was raised (:issue:`31126`)
diff --git a/pandas/api/indexers/__init__.py b/pandas/api/indexers/__init__.py
index 0b36b53675e23..89a9d8bfafdf1 100644
--- a/pandas/api/indexers/__init__.py
+++ b/pandas/api/indexers/__init__.py
@@ -3,6 +3,15 @@
"""
from pandas.core.indexers import check_array_indexer
-from pandas.core.window.indexers import BaseIndexer, FixedForwardWindowIndexer
+from pandas.core.window.indexers import (
+ BaseIndexer,
+ FixedForwardWindowIndexer,
+ NonFixedVariableWindowIndexer,
+)
-__all__ = ["check_array_indexer", "BaseIndexer", "FixedForwardWindowIndexer"]
+__all__ = [
+ "check_array_indexer",
+ "BaseIndexer",
+ "FixedForwardWindowIndexer",
+ "NonFixedVariableWindowIndexer",
+]
| xref https://github.com/pandas-dev/pandas/pull/34947#issuecomment-649638742
| https://api.github.com/repos/pandas-dev/pandas/pulls/34994 | 2020-06-25T17:20:35Z | 2020-06-29T23:15:46Z | 2020-06-29T23:15:46Z | 2020-06-30T05:12:50Z |
DOC/TST: DataFrame constructor with a list of DataFrames | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index c5eb2febe8ae9..460b2ba4a537d 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -695,6 +695,7 @@ Other API changes
- :func: `merge` now checks ``suffixes`` parameter type to be ``tuple`` and raises ``TypeError``, whereas before a ``list`` or ``set`` were accepted and that the ``set`` could produce unexpected results (:issue:`33740`)
- :class:`Period` no longer accepts tuples for the ``freq`` argument (:issue:`34658`)
- :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`)
+- The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`).
Increased minimum versions for dependencies
diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index d49f1f154a2c1..4b9db810dead0 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -321,7 +321,7 @@ def convert(v):
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
- raise ValueError("Must pass 2-d input")
+ raise ValueError(f"Must pass 2-d input. shape={values.shape}")
return values
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 02a871666c78d..dba243f1a339a 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -11,7 +11,7 @@
import pytz
from pandas.compat import PY37, is_platform_little_endian
-from pandas.compat.numpy import _is_numpy_dev
+from pandas.compat.numpy import _np_version_under1p19
from pandas.core.dtypes.common import is_integer_dtype
@@ -147,14 +147,20 @@ def test_constructor_dtype_list_data(self):
assert df.loc[1, 0] is None
assert df.loc[0, 1] == "2"
- @pytest.mark.xfail(_is_numpy_dev, reason="Interprets list of frame as 3D")
- def test_constructor_list_frames(self):
- # see gh-3243
- result = DataFrame([DataFrame()])
- assert result.shape == (1, 0)
+ @pytest.mark.skipif(_np_version_under1p19, reason="NumPy change.")
+ def test_constructor_list_of_2d_raises(self):
+ # https://github.com/pandas-dev/pandas/issues/32289
+ a = pd.DataFrame()
+ b = np.empty((0, 0))
+ with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"):
+ pd.DataFrame([a])
- result = DataFrame([DataFrame(dict(A=np.arange(5)))])
- assert isinstance(result.iloc[0, 0], DataFrame)
+ with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"):
+ pd.DataFrame([b])
+
+ a = pd.DataFrame({"A": [1, 2]})
+ with pytest.raises(ValueError, match=r"shape=\(2, 2, 1\)"):
+ pd.DataFrame([a, a])
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
@@ -507,22 +513,6 @@ def test_constructor_error_msgs(self):
with pytest.raises(ValueError, match=msg):
DataFrame({"a": False, "b": True})
- @pytest.mark.xfail(_is_numpy_dev, reason="Interprets embedded frame as 3D")
- def test_constructor_with_embedded_frames(self):
-
- # embedded data frames
- df1 = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
- df2 = DataFrame([df1, df1 + 10])
-
- df2.dtypes
- str(df2)
-
- result = df2.loc[0, 0]
- tm.assert_frame_equal(result, df1)
-
- result = df2.loc[1, 0]
- tm.assert_frame_equal(result, df1 + 10)
-
def test_constructor_subclass_dict(self, float_frame, dict_subclass):
# Test for passing dict subclass to constructor
data = {
| Closes #32289 by removing the failing tests and asserting that we raise For NumPy>=1.19. See https://github.com/pandas-dev/pandas/issues/32289#issuecomment-649492024 for details, but the tldr is that DataFrames are now treated identically to 2D ndarrays.
One thing I wasn't sure of: where to document this. IMO this is too minor of an edge case to warrant anything in the docstring / user guide. | https://api.github.com/repos/pandas-dev/pandas/pulls/34991 | 2020-06-25T14:22:35Z | 2020-06-30T11:54:58Z | 2020-06-30T11:54:57Z | 2020-06-30T11:55:02Z |
TYP: remove inappropriate use of cast | diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 3a96a9ba8ad69..22cdd8e235e0b 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -588,7 +588,7 @@ def __init__(
elif isinstance(col_space, (int, str)):
self.col_space = {"": col_space}
self.col_space.update({column: col_space for column in self.frame.columns})
- elif isinstance(col_space, dict):
+ elif isinstance(col_space, Mapping):
for column in col_space.keys():
if column not in self.frame.columns and column != "":
raise ValueError(
@@ -596,7 +596,6 @@ def __init__(
)
self.col_space = col_space
else:
- col_space = cast(Sequence, col_space)
if len(frame.columns) != len(col_space):
raise ValueError(
f"Col_space length({len(col_space)}) should match "
| the cast was silencing `error: Argument 2 to "zip" has incompatible type "Union[Sequence[Union[str, int]], Mapping[Optional[Hashable], Union[str, int]]]"; expected
"Iterable[Union[str, int]]"`
This error is because col_space is defined as `Union[str, int, Sequence[Union[str, int]], Mapping[Label, Union[str, int]]]` and the `elif isinstance(col_space, dict):` would not catch non-dict mappings, which would go though the else.
so the else is non-dict mappings and sequences and the cast to sequence is not safe.
```
>>> df = pd.DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
>>>
>>> print(df.to_string(col_space={"b": 20}))
a b c
0 0.382920 0.057121 0.862742
1 0.579339 0.391014 0.907678
2 0.340584 0.889387 0.922690
>>>
>>> from collections import UserDict
>>>
>>> print(df.to_string(col_space=UserDict(b=20)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\simon\pandas\pandas\core\frame.py", line 838, in to_string
formatter = fmt.DataFrameFormatter(
File "C:\Users\simon\pandas\pandas\io\formats\format.py", line 601, in __init__
raise ValueError(
ValueError: Col_space length(1) should match DataFrame number of columns(3)
>>>
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/34990 | 2020-06-25T14:18:12Z | 2020-06-25T17:38:44Z | 2020-06-25T17:38:44Z | 2020-06-25T18:40:03Z |
DOC: typo in release notes | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 0d2254e401103..10dac7e2863f9 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -314,8 +314,8 @@ Other enhancements
result in object dtype but preserve the integer dtype (:issue:`33607`, :issue:`34339`).
- :meth:`~pandas.io.gbq.read_gbq` now allows to disable progress bar (:issue:`33360`).
- :meth:`~pandas.io.gbq.read_gbq` now supports the ``max_results`` kwarg from ``pandas-gbq`` (:issue:`34639`).
-- :meth:`Dataframe.cov` and :meth:`Series.cov` now support a new parameter ddof to support delta degrees of freedom as in the corresponding numpy methods (:issue:`34611`).
-- :meth:`DataFrame.to_html` and :meth:`DataFrame.to_string`'s ``col_space`` parameter now accepts a list of dict to change only some specific columns' width (:issue:`28917`).
+- :meth:`DataFrame.cov` and :meth:`Series.cov` now support a new parameter ddof to support delta degrees of freedom as in the corresponding numpy methods (:issue:`34611`).
+- :meth:`DataFrame.to_html` and :meth:`DataFrame.to_string`'s ``col_space`` parameter now accepts a list or dict to change only some specific columns' width (:issue:`28917`).
- :meth:`DataFrame.to_excel` can now also write OpenOffice spreadsheet (.ods) files (:issue:`27222`)
.. ---------------------------------------------------------------------------
| https://api.github.com/repos/pandas-dev/pandas/pulls/34989 | 2020-06-25T13:10:34Z | 2020-06-25T14:39:07Z | 2020-06-25T14:39:07Z | 2020-06-25T14:41:35Z | |
BUG: Cannot create third-party ExtensionArrays for datetime types (xfail) | diff --git a/pandas/tests/extension/arrow/test_timestamp.py b/pandas/tests/extension/arrow/test_timestamp.py
new file mode 100644
index 0000000000000..29bd3713e9552
--- /dev/null
+++ b/pandas/tests/extension/arrow/test_timestamp.py
@@ -0,0 +1,54 @@
+import datetime
+from typing import Type
+
+import pytest
+
+import pandas as pd
+from pandas.api.extensions import ExtensionDtype, register_extension_dtype
+
+pytest.importorskip("pyarrow", minversion="0.13.0")
+
+import pyarrow as pa # isort:skip
+
+from .arrays import ArrowExtensionArray # isort:skip
+
+
+@register_extension_dtype
+class ArrowTimestampUSDtype(ExtensionDtype):
+
+ type = datetime.datetime
+ kind = "M"
+ name = "arrow_timestamp_us"
+ na_value = pa.NULL
+
+ @classmethod
+ def construct_array_type(cls) -> Type["ArrowTimestampUSArray"]:
+ """
+ Return the array type associated with this dtype.
+
+ Returns
+ -------
+ type
+ """
+ return ArrowTimestampUSArray
+
+
+class ArrowTimestampUSArray(ArrowExtensionArray):
+ def __init__(self, values):
+ if not isinstance(values, pa.ChunkedArray):
+ raise ValueError
+
+ assert values.type == pa.timestamp("us")
+ self._data = values
+ self._dtype = ArrowTimestampUSDtype()
+
+
+def test_constructor_extensionblock():
+ # GH 34986
+ pd.DataFrame(
+ {
+ "timestamp": ArrowTimestampUSArray.from_scalars(
+ [None, datetime.datetime(2010, 9, 8, 7, 6, 5, 4)]
+ )
+ }
+ )
| - [x] closes #34986
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34987 | 2020-06-25T11:05:58Z | 2021-01-14T15:41:33Z | 2021-01-14T15:41:32Z | 2021-01-14T18:59:14Z |
ENH: concat of nullable int + bool preserves int dtype | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index cee41f248fc60..75809be9fa647 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -321,7 +321,7 @@ Other enhancements
- :meth:`DataFrame.hist`, :meth:`Series.hist`, :meth:`core.groupby.DataFrameGroupBy.hist`, and :meth:`core.groupby.SeriesGroupBy.hist` have gained the ``legend`` argument. Set to True to show a legend in the histogram. (:issue:`6279`)
- :func:`concat` and :meth:`~DataFrame.append` now preserve extension dtypes, for example
combining a nullable integer column with a numpy integer column will no longer
- result in object dtype but preserve the integer dtype (:issue:`33607`, :issue:`34339`).
+ result in object dtype but preserve the integer dtype (:issue:`33607`, :issue:`34339`, :issue:`34095`).
- :meth:`~pandas.io.gbq.read_gbq` now allows to disable progress bar (:issue:`33360`).
- :meth:`~pandas.io.gbq.read_gbq` now supports the ``max_results`` kwarg from ``pandas-gbq`` (:issue:`34639`).
- :meth:`DataFrame.cov` and :meth:`Series.cov` now support a new parameter ddof to support delta degrees of freedom as in the corresponding numpy methods (:issue:`34611`).
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index df43b5d6115ba..7be7ef3637ee5 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -92,10 +92,13 @@ def construct_array_type(cls) -> Type["IntegerArray"]:
return IntegerArray
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
- # for now only handle other integer types
+ # we only handle nullable EA dtypes and numeric numpy dtypes
if not all(
- isinstance(t, _IntegerDtype)
- or (isinstance(t, np.dtype) and np.issubdtype(t, np.integer))
+ isinstance(t, BaseMaskedDtype)
+ or (
+ isinstance(t, np.dtype)
+ and (np.issubdtype(t, np.number) or np.issubdtype(t, np.bool_))
+ )
for t in dtypes
):
return None
diff --git a/pandas/tests/arrays/integer/test_concat.py b/pandas/tests/arrays/integer/test_concat.py
index 3ace35700bd3e..fc24709deb82c 100644
--- a/pandas/tests/arrays/integer/test_concat.py
+++ b/pandas/tests/arrays/integer/test_concat.py
@@ -1,3 +1,4 @@
+import numpy as np
import pytest
import pandas as pd
@@ -15,12 +16,52 @@
(["Int32", "UInt32"], "Int64"),
# this still gives object (awaiting float extension dtype)
(["Int64", "UInt64"], "object"),
+ (["Int64", "boolean"], "Int64"),
+ (["UInt8", "boolean"], "UInt8"),
],
)
def test_concat_series(to_concat_dtypes, result_dtype):
- result = pd.concat([pd.Series([1, 2, pd.NA], dtype=t) for t in to_concat_dtypes])
- expected = pd.concat([pd.Series([1, 2, pd.NA], dtype=object)] * 2).astype(
+ result = pd.concat([pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes])
+ expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype(
result_dtype
)
tm.assert_series_equal(result, expected)
+
+ # order doesn't matter for result
+ result = pd.concat(
+ [pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes[::-1]]
+ )
+ expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype(
+ result_dtype
+ )
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "to_concat_dtypes, result_dtype",
+ [
+ (["Int64", "int64"], "Int64"),
+ (["UInt64", "uint64"], "UInt64"),
+ (["Int8", "int8"], "Int8"),
+ (["Int8", "int16"], "Int16"),
+ (["UInt8", "int8"], "Int16"),
+ (["Int32", "uint32"], "Int64"),
+ # this still gives object (awaiting float extension dtype)
+ (["Int64", "uint64"], "object"),
+ (["Int64", "bool"], "Int64"),
+ (["UInt8", "bool"], "UInt8"),
+ ],
+)
+def test_concat_series_with_numpy(to_concat_dtypes, result_dtype):
+
+ s1 = pd.Series([0, 1, pd.NA], dtype=to_concat_dtypes[0])
+ s2 = pd.Series(np.array([0, 1], dtype=to_concat_dtypes[1]))
+ result = pd.concat([s1, s2], ignore_index=True)
+ expected = pd.Series([0, 1, pd.NA, 0, 1], dtype=object).astype(result_dtype)
+ tm.assert_series_equal(result, expected)
+
+ # order doesn't matter for result
+ result = pd.concat([s2, s1], ignore_index=True)
+ expected = pd.Series([0, 1, 0, 1, pd.NA], dtype=object).astype(result_dtype)
+ tm.assert_series_equal(result, expected)
| Closes #34095 | https://api.github.com/repos/pandas-dev/pandas/pulls/34985 | 2020-06-25T07:47:18Z | 2020-07-08T16:15:56Z | 2020-07-08T16:15:55Z | 2020-07-08T16:20:00Z |
BUG: HDFStore unable to create colindex w/o error thrown | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 70c45acec9f35..22a69c1ddfb1a 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1040,6 +1040,7 @@ I/O
- Bug in :meth:`read_excel` for ODS files removes 0.0 values (:issue:`27222`)
- Bug in :meth:`ujson.encode` was raising an `OverflowError` with numbers larger than sys.maxsize (:issue: `34395`)
- Bug in :meth:`HDFStore.append_to_multiple` was raising a ``ValueError`` when the min_itemsize parameter is set (:issue:`11238`)
+- Bug in :meth:`~HDFStore.create_table` now raises an error when `column` argument was not specified in `data_columns` on input (:issue:`28156`)
- :meth:`read_json` now could read line-delimited json file from a file url while `lines` and `chunksize` are set.
Plotting
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 0e5d7b007bd89..981b380f8b5e9 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -3569,7 +3569,6 @@ def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None):
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
-
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
@@ -3597,6 +3596,13 @@ def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None):
"data_columns when initializing the table."
)
v.create_index(**kw)
+ elif c in self.non_index_axes[0][1]:
+ # GH 28156
+ raise AttributeError(
+ f"column {c} is not a data_column.\n"
+ f"In order to read column {c} you must reload the dataframe \n"
+ f"into HDFStore and include {c} with the data_columns argument."
+ )
def _read_axes(
self, where, start: Optional[int] = None, stop: Optional[int] = None
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index c69992471fc9b..df014171be817 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -1727,6 +1727,37 @@ def col(t, column):
with pytest.raises(TypeError):
store.create_table_index("f2")
+ def test_create_table_index_data_columns_argument(self, setup_path):
+ # GH 28156
+
+ with ensure_clean_store(setup_path) as store:
+
+ with catch_warnings(record=True):
+
+ def col(t, column):
+ return getattr(store.get_storer(t).table.cols, column)
+
+ # data columns
+ df = tm.makeTimeDataFrame()
+ df["string"] = "foo"
+ df["string2"] = "bar"
+ store.append("f", df, data_columns=["string"])
+ assert col("f", "index").is_indexed is True
+ assert col("f", "string").is_indexed is True
+
+ msg = "'Cols' object has no attribute 'string2'"
+ with pytest.raises(AttributeError, match=msg):
+ col("f", "string2").is_indexed
+
+ # try to index a col which isn't a data_column
+ msg = (
+ f"column string2 is not a data_column.\n"
+ f"In order to read column string2 you must reload the dataframe \n"
+ f"into HDFStore and include string2 with the data_columns argument."
+ )
+ with pytest.raises(AttributeError, match=msg):
+ store.create_table_index("f", columns=["string2"])
+
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
| - [x] closes #28156
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I found this discussion of the `data_columns` argument to `pd.HDFStore.create_table_index` really useful:
https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns | https://api.github.com/repos/pandas-dev/pandas/pulls/34983 | 2020-06-25T03:36:26Z | 2020-06-30T13:01:24Z | 2020-06-30T13:01:24Z | 2020-06-30T17:46:22Z |
REF: simplify advance/move/set_length in libreduction | diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx
index 99c6f8bde5dd8..58de682c56d55 100644
--- a/pandas/_libs/reduction.pyx
+++ b/pandas/_libs/reduction.pyx
@@ -205,8 +205,7 @@ cdef class _BaseGrouper:
cdef inline object _apply_to_group(self,
object cached_typ, object cached_ityp,
- Slider islider, Slider vslider,
- Py_ssize_t group_size, bint initialized):
+ bint initialized):
"""
Call self.f on our new group, then update to the next group.
"""
@@ -222,9 +221,6 @@ cdef class _BaseGrouper:
initialized = True
_check_result_array(res, len(self.dummy_arr))
- islider.advance(group_size)
- vslider.advance(group_size)
-
return res, initialized
@@ -269,7 +265,7 @@ cdef class SeriesBinGrouper(_BaseGrouper):
cdef:
ndarray arr, result
ndarray[int64_t] counts
- Py_ssize_t i, n, group_size
+ Py_ssize_t i, n, group_size, start, end
object res
bint initialized = 0
Slider vslider, islider
@@ -293,19 +289,21 @@ cdef class SeriesBinGrouper(_BaseGrouper):
result = np.empty(self.ngroups, dtype='O')
+ start = 0
try:
for i in range(self.ngroups):
group_size = counts[i]
+ end = start + group_size
- islider.set_length(group_size)
- vslider.set_length(group_size)
+ islider.move(start, end)
+ vslider.move(start, end)
cached_typ, cached_ityp = self._update_cached_objs(
cached_typ, cached_ityp, islider, vslider)
res, initialized = self._apply_to_group(cached_typ, cached_ityp,
- islider, vslider,
- group_size, initialized)
+ initialized)
+ start += group_size
result[i] = res
@@ -361,7 +359,7 @@ cdef class SeriesGrouper(_BaseGrouper):
# Define result to avoid UnboundLocalError
ndarray arr, result = None
ndarray[int64_t] labels, counts
- Py_ssize_t i, n, group_size, lab
+ Py_ssize_t i, n, group_size, lab, start, end
object res
bint initialized = 0
Slider vslider, islider
@@ -377,6 +375,7 @@ cdef class SeriesGrouper(_BaseGrouper):
result = np.empty(self.ngroups, dtype='O')
+ start = 0
try:
for i in range(n):
group_size += 1
@@ -385,20 +384,21 @@ cdef class SeriesGrouper(_BaseGrouper):
if i == n - 1 or lab != labels[i + 1]:
if lab == -1:
- islider.advance(group_size)
- vslider.advance(group_size)
+ start += group_size
group_size = 0
continue
- islider.set_length(group_size)
- vslider.set_length(group_size)
+ end = start + group_size
+ islider.move(start, end)
+ vslider.move(start, end)
cached_typ, cached_ityp = self._update_cached_objs(
cached_typ, cached_ityp, islider, vslider)
res, initialized = self._apply_to_group(cached_typ, cached_ityp,
- islider, vslider,
- group_size, initialized)
+ initialized)
+
+ start += group_size
result[lab] = res
counts[lab] = group_size
@@ -458,9 +458,6 @@ cdef class Slider:
self.buf.data = self.values.data
self.buf.strides[0] = self.stride
- cdef advance(self, Py_ssize_t k):
- self.buf.data = <char*>self.buf.data + self.stride * k
-
cdef move(self, int start, int end):
"""
For slicing
@@ -468,9 +465,6 @@ cdef class Slider:
self.buf.data = self.values.data + self.stride * start
self.buf.shape[0] = end - start
- cdef set_length(self, Py_ssize_t length):
- self.buf.shape[0] = length
-
cdef reset(self):
self.buf.shape[0] = self.orig_len
| Make it so that we set `buf.data` in fewer places. I think from here its pretty straightforward to avoid setting buf.data at all.
cc @WillAyd. | https://api.github.com/repos/pandas-dev/pandas/pulls/34982 | 2020-06-24T23:55:37Z | 2020-06-25T14:40:55Z | 2020-06-25T14:40:55Z | 2020-06-25T15:26:51Z |
ERR: Fix to_timedelta error message | diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 1c3e69e21aa18..2862e62e3d522 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -1151,7 +1151,7 @@ class Timedelta(_Timedelta):
if unit in {'Y', 'y', 'M'}:
raise ValueError(
- "Units 'M' and 'Y' are no longer supported, as they do not "
+ "Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index f6661c6b50dfb..dccc8369c5366 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -136,7 +136,7 @@ def __new__(
if unit in {"Y", "y", "M"}:
raise ValueError(
- "Units 'M' and 'Y' are no longer supported, as they do not "
+ "Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index a643c312ec358..e457a8819f27a 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -94,7 +94,7 @@ def to_timedelta(arg, unit=None, errors="raise"):
if unit in {"Y", "y", "M"}:
raise ValueError(
- "Units 'M' and 'Y' are no longer supported, as they do not "
+ "Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py
index acc68dfe7301f..41e4e220c999c 100644
--- a/pandas/tests/indexes/timedeltas/test_constructors.py
+++ b/pandas/tests/indexes/timedeltas/test_constructors.py
@@ -12,7 +12,7 @@
class TestTimedeltaIndex:
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
- msg = "Units 'M' and 'Y' are no longer supported"
+ msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
TimedeltaIndex([1, 3, 7], unit)
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 38e77321418d1..a01921bd6c4c2 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -265,7 +265,7 @@ def test_unit_parser(self, units, np_unit, wrapper):
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
- msg = "Units 'M' and 'Y' are no longer supported"
+ msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
| Error messages were missing a case (unit of "y") | https://api.github.com/repos/pandas-dev/pandas/pulls/34981 | 2020-06-24T23:08:03Z | 2020-06-26T20:23:05Z | 2020-06-26T20:23:05Z | 2020-06-26T22:12:09Z |
DOC: Add notes about M and Y to to_timedelata documentation. (#34968) | diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index a643c312ec358..c34fe8d298cb9 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -25,7 +25,10 @@ def to_timedelta(arg, unit=None, errors="raise"):
Parameters
----------
arg : str, timedelta, list-like or Series
- The data to be converted to timedelta.
+ The data to be converted to timedelta. The character M by itself,
+ e.g. '1M', is treated as minute, not month. The characters Y and y
+ are treated as the mean length of the Gregorian calendar year -
+ 365.2425 days or 365 days 5 hours 49 minutes 12 seconds.
unit : str, optional
Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Not sure it closes these issues, but using "M" in the arg argument of to_timedelta caused confusion in #34968 and #27285. #33094 is somewhat related. I came across another issue I can't find now where there was confusion about "Y" returning a days with times delta | https://api.github.com/repos/pandas-dev/pandas/pulls/34979 | 2020-06-24T22:31:56Z | 2020-09-26T11:31:16Z | 2020-09-26T11:31:16Z | 2020-09-26T13:20:32Z |
PERF: optimize Block.getitem_block | diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx
index db452cb0f1fa4..8b4b490f49b12 100644
--- a/pandas/_libs/internals.pyx
+++ b/pandas/_libs/internals.pyx
@@ -16,6 +16,7 @@ cnp.import_array()
from pandas._libs.algos import ensure_int64
+@cython.final
cdef class BlockPlacement:
# __slots__ = '_as_slice', '_as_array', '_len'
cdef:
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 0c98a779424bd..6207785fb2975 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -8,6 +8,7 @@
from pandas._libs import NaT, algos as libalgos, lib, writers
import pandas._libs.internals as libinternals
+from pandas._libs.internals import BlockPlacement
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import ArrayLike
@@ -112,6 +113,19 @@ class Block(PandasObject):
_verify_integrity = True
_validate_ndim = True
+ @classmethod
+ def _simple_new(
+ cls, values: ArrayLike, placement: BlockPlacement, ndim: int
+ ) -> "Block":
+ """
+ Fastpath constructor, does *no* validation
+ """
+ obj = object.__new__(cls)
+ obj.ndim = ndim
+ obj.values = values
+ obj._mgr_locs = placement
+ return obj
+
def __init__(self, values, placement, ndim=None):
self.ndim = self._check_ndim(values, ndim)
self.mgr_locs = placement
@@ -289,13 +303,15 @@ def getitem_block(self, slicer, new_mgr_locs=None):
if new_mgr_locs is None:
axis0_slicer = slicer[0] if isinstance(slicer, tuple) else slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
+ elif not isinstance(new_mgr_locs, BlockPlacement):
+ new_mgr_locs = BlockPlacement(new_mgr_locs)
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
- return self.make_block_same_class(new_values, new_mgr_locs)
+ return type(self)._simple_new(new_values, new_mgr_locs, self.ndim)
@property
def shape(self):
| Performance comparison is based on the the asv `groupby.Apply.time_scalar_function_single_col`, which is the one in which disabling the libreduction path has the biggest impact.
```
import pandas as pd
import numpy as np
N = 10 ** 4
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = pd.DataFrame(
{
"key": labels,
"key2": labels2,
"value1": np.random.randn(N),
"value2": ["foo", "bar", "baz", "qux"] * (N // 4),
}
)
In [4]: %prun -s cumtime df.groupby("key").apply(lambda x: 1)
```
master-but-with-fast_apply-disabled:
```
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 0.081 0.081 groupby.py:822(apply)
1 0.000 0.000 0.081 0.081 groupby.py:871(_python_apply_general)
1 0.006 0.006 0.080 0.080 ops.py:157(apply)
1989 0.002 0.000 0.069 0.000 ops.py:933(__iter__)
1988 0.003 0.000 0.066 0.000 ops.py:966(_chop)
1988 0.004 0.000 0.059 0.000 managers.py:724(get_slice)
1988 0.002 0.000 0.035 0.000 managers.py:730(<listcomp>)
5964 0.009 0.000 0.033 0.000 blocks.py:283(getitem_block)
5971 0.005 0.000 0.021 0.000 blocks.py:247(make_block_same_class)
5974 0.007 0.000 0.013 0.000 blocks.py:115(__init__)
1988 0.003 0.000 0.011 0.000 base.py:4064(__getitem__)
1990 0.003 0.000 0.008 0.000 managers.py:120(__init__)
1988 0.002 0.000 0.007 0.000 numeric.py:105(_shallow_copy)
1992 0.002 0.000 0.007 0.000 blocks.py:2379(__init__)
1988 0.002 0.000 0.005 0.000 base.py:485(_shallow_copy)
1990 0.002 0.000 0.004 0.000 frame.py:432(__init__)
1992 0.002 0.000 0.003 0.000 base.py:450(_simple_new)
```
PR-but-with-fast_apply-disabled
```
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 0.061 0.061 groupby.py:822(apply)
1 0.000 0.000 0.061 0.061 groupby.py:871(_python_apply_general)
1 0.005 0.005 0.058 0.058 ops.py:157(apply)
1991 0.002 0.000 0.048 0.000 ops.py:933(__iter__)
1990 0.003 0.000 0.046 0.000 ops.py:966(_chop)
1990 0.004 0.000 0.039 0.000 managers.py:724(get_slice)
1990 0.002 0.000 0.017 0.000 managers.py:730(<listcomp>)
5970 0.009 0.000 0.015 0.000 blocks.py:297(getitem_block)
1990 0.002 0.000 0.011 0.000 base.py:4064(__getitem__)
1992 0.003 0.000 0.007 0.000 managers.py:120(__init__)
1990 0.002 0.000 0.007 0.000 numeric.py:105(_shallow_copy)
1990 0.002 0.000 0.005 0.000 base.py:485(_shallow_copy)
1992 0.002 0.000 0.004 0.000 frame.py:432(__init__)
5970 0.002 0.000 0.003 0.000 blocks.py:116(_simple_new)
1994 0.002 0.000 0.003 0.000 base.py:450(_simple_new)
1992 0.001 0.000 0.002 0.000 managers.py:126(<listcomp>)
22438 0.002 0.000 0.002 0.000 {built-in method builtins.isinstance}
```
master
```
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 0.009 0.009 groupby.py:822(apply)
1 0.000 0.000 0.009 0.009 groupby.py:871(_python_apply_general)
1 0.000 0.000 0.008 0.008 ops.py:157(apply)
1 0.000 0.000 0.005 0.005 ops.py:961(fast_apply)
1 0.003 0.003 0.005 0.005 {pandas._libs.reduction.apply_frame_axis0}
1994 0.001 0.000 0.002 0.000 base.py:4064(__getitem__)
1 0.000 0.000 0.001 0.001 ops.py:135(_get_splitter)
1 0.000 0.000 0.001 0.001 ops.py:268(group_info)
1 0.000 0.000 0.001 0.001 generic.py:1206(_wrap_applied_output)
1 0.000 0.000 0.001 0.001 ops.py:285(_get_compressed_codes)
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/34978 | 2020-06-24T22:20:54Z | 2020-06-25T14:42:14Z | 2020-06-25T14:42:14Z | 2020-06-25T15:25:21Z |
TYP: make the type annotations of read_csv & read_table discoverable | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 7c9fa53568f45..95084debd5144 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1019,6 +1019,7 @@ I/O
- Bug in :meth:`~SQLDatabase.execute` was raising a ``ProgrammingError`` for some DB-API drivers when the SQL statement contained the `%` character and no parameters were present (:issue:`34211`)
- Bug in :meth:`~pandas.io.stata.StataReader` which resulted in categorical variables with difference dtypes when reading data using an iterator. (:issue:`31544`)
- :meth:`HDFStore.keys` has now an optional `include` parameter that allows the retrieval of all native HDF5 table names (:issue:`29916`)
+- `TypeError` exceptions raised by :meth:`read_csv` and :meth:`read_table` were showing as ``parser_f`` when an unexpected keyword argument was passed (:issue:`25648`)
- Bug in :meth:`read_excel` for ODS files removes 0.0 values (:issue:`27222`)
Plotting
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 62347f7110d76..c427d3a198b10 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -530,176 +530,229 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
_deprecated_args: Set[str] = set()
-def _make_parser_function(name, default_sep=","):
- def parser_f(
- filepath_or_buffer: FilePathOrBuffer,
- sep=default_sep,
- delimiter=None,
- # Column and Index Locations and Names
- header="infer",
- names=None,
- index_col=None,
- usecols=None,
- squeeze=False,
- prefix=None,
- mangle_dupe_cols=True,
- # General Parsing Configuration
- dtype=None,
- engine=None,
- converters=None,
- true_values=None,
- false_values=None,
- skipinitialspace=False,
- skiprows=None,
- skipfooter=0,
- nrows=None,
- # NA and Missing Data Handling
- na_values=None,
- keep_default_na=True,
- na_filter=True,
- verbose=False,
- skip_blank_lines=True,
- # Datetime Handling
- parse_dates=False,
- infer_datetime_format=False,
- keep_date_col=False,
- date_parser=None,
- dayfirst=False,
- cache_dates=True,
- # Iteration
- iterator=False,
- chunksize=None,
- # Quoting, Compression, and File Format
- compression="infer",
- thousands=None,
- decimal: str = ".",
- lineterminator=None,
- quotechar='"',
- quoting=csv.QUOTE_MINIMAL,
- doublequote=True,
- escapechar=None,
- comment=None,
- encoding=None,
- dialect=None,
- # Error Handling
- error_bad_lines=True,
- warn_bad_lines=True,
- # Internal
- delim_whitespace=False,
- low_memory=_c_parser_defaults["low_memory"],
- memory_map=False,
- float_precision=None,
- ):
-
- # gh-23761
- #
- # When a dialect is passed, it overrides any of the overlapping
- # parameters passed in directly. We don't want to warn if the
- # default parameters were passed in (since it probably means
- # that the user didn't pass them in explicitly in the first place).
- #
- # "delimiter" is the annoying corner case because we alias it to
- # "sep" before doing comparison to the dialect values later on.
- # Thus, we need a flag to indicate that we need to "override"
- # the comparison to dialect values by checking if default values
- # for BOTH "delimiter" and "sep" were provided.
- if dialect is not None:
- sep_override = delimiter is None and sep == default_sep
- kwds = dict(sep_override=sep_override)
- else:
- kwds = dict()
-
- # Alias sep -> delimiter.
- if delimiter is None:
- delimiter = sep
-
- if delim_whitespace and delimiter != default_sep:
- raise ValueError(
- "Specified a delimiter with both sep and "
- "delim_whitespace=True; you can only specify one."
- )
+@Appender(
+ _doc_read_csv_and_table.format(
+ func_name="read_csv",
+ summary="Read a comma-separated values (csv) file into DataFrame.",
+ _default_sep="','",
+ )
+)
+def read_csv(
+ filepath_or_buffer: FilePathOrBuffer,
+ sep=",",
+ delimiter=None,
+ # Column and Index Locations and Names
+ header="infer",
+ names=None,
+ index_col=None,
+ usecols=None,
+ squeeze=False,
+ prefix=None,
+ mangle_dupe_cols=True,
+ # General Parsing Configuration
+ dtype=None,
+ engine=None,
+ converters=None,
+ true_values=None,
+ false_values=None,
+ skipinitialspace=False,
+ skiprows=None,
+ skipfooter=0,
+ nrows=None,
+ # NA and Missing Data Handling
+ na_values=None,
+ keep_default_na=True,
+ na_filter=True,
+ verbose=False,
+ skip_blank_lines=True,
+ # Datetime Handling
+ parse_dates=False,
+ infer_datetime_format=False,
+ keep_date_col=False,
+ date_parser=None,
+ dayfirst=False,
+ cache_dates=True,
+ # Iteration
+ iterator=False,
+ chunksize=None,
+ # Quoting, Compression, and File Format
+ compression="infer",
+ thousands=None,
+ decimal: str = ".",
+ lineterminator=None,
+ quotechar='"',
+ quoting=csv.QUOTE_MINIMAL,
+ doublequote=True,
+ escapechar=None,
+ comment=None,
+ encoding=None,
+ dialect=None,
+ # Error Handling
+ error_bad_lines=True,
+ warn_bad_lines=True,
+ # Internal
+ delim_whitespace=False,
+ low_memory=_c_parser_defaults["low_memory"],
+ memory_map=False,
+ float_precision=None,
+):
+ # gh-23761
+ #
+ # When a dialect is passed, it overrides any of the overlapping
+ # parameters passed in directly. We don't want to warn if the
+ # default parameters were passed in (since it probably means
+ # that the user didn't pass them in explicitly in the first place).
+ #
+ # "delimiter" is the annoying corner case because we alias it to
+ # "sep" before doing comparison to the dialect values later on.
+ # Thus, we need a flag to indicate that we need to "override"
+ # the comparison to dialect values by checking if default values
+ # for BOTH "delimiter" and "sep" were provided.
+ default_sep = ","
+
+ if dialect is not None:
+ sep_override = delimiter is None and sep == default_sep
+ kwds = dict(sep_override=sep_override)
+ else:
+ kwds = dict()
- if engine is not None:
- engine_specified = True
- else:
- engine = "c"
- engine_specified = False
+ # Alias sep -> delimiter.
+ if delimiter is None:
+ delimiter = sep
- kwds.update(
- delimiter=delimiter,
- engine=engine,
- dialect=dialect,
- compression=compression,
- engine_specified=engine_specified,
- doublequote=doublequote,
- escapechar=escapechar,
- quotechar=quotechar,
- quoting=quoting,
- skipinitialspace=skipinitialspace,
- lineterminator=lineterminator,
- header=header,
- index_col=index_col,
- names=names,
- prefix=prefix,
- skiprows=skiprows,
- skipfooter=skipfooter,
- na_values=na_values,
- true_values=true_values,
- false_values=false_values,
- keep_default_na=keep_default_na,
- thousands=thousands,
- comment=comment,
- decimal=decimal,
- parse_dates=parse_dates,
- keep_date_col=keep_date_col,
- dayfirst=dayfirst,
- date_parser=date_parser,
- cache_dates=cache_dates,
- nrows=nrows,
- iterator=iterator,
- chunksize=chunksize,
- converters=converters,
- dtype=dtype,
- usecols=usecols,
- verbose=verbose,
- encoding=encoding,
- squeeze=squeeze,
- memory_map=memory_map,
- float_precision=float_precision,
- na_filter=na_filter,
- delim_whitespace=delim_whitespace,
- warn_bad_lines=warn_bad_lines,
- error_bad_lines=error_bad_lines,
- low_memory=low_memory,
- mangle_dupe_cols=mangle_dupe_cols,
- infer_datetime_format=infer_datetime_format,
- skip_blank_lines=skip_blank_lines,
+ if delim_whitespace and delimiter != default_sep:
+ raise ValueError(
+ "Specified a delimiter with both sep and "
+ "delim_whitespace=True; you can only specify one."
)
- return _read(filepath_or_buffer, kwds)
-
- parser_f.__name__ = name
-
- return parser_f
+ if engine is not None:
+ engine_specified = True
+ else:
+ engine = "c"
+ engine_specified = False
+
+ kwds.update(
+ delimiter=delimiter,
+ engine=engine,
+ dialect=dialect,
+ compression=compression,
+ engine_specified=engine_specified,
+ doublequote=doublequote,
+ escapechar=escapechar,
+ quotechar=quotechar,
+ quoting=quoting,
+ skipinitialspace=skipinitialspace,
+ lineterminator=lineterminator,
+ header=header,
+ index_col=index_col,
+ names=names,
+ prefix=prefix,
+ skiprows=skiprows,
+ skipfooter=skipfooter,
+ na_values=na_values,
+ true_values=true_values,
+ false_values=false_values,
+ keep_default_na=keep_default_na,
+ thousands=thousands,
+ comment=comment,
+ decimal=decimal,
+ parse_dates=parse_dates,
+ keep_date_col=keep_date_col,
+ dayfirst=dayfirst,
+ date_parser=date_parser,
+ cache_dates=cache_dates,
+ nrows=nrows,
+ iterator=iterator,
+ chunksize=chunksize,
+ converters=converters,
+ dtype=dtype,
+ usecols=usecols,
+ verbose=verbose,
+ encoding=encoding,
+ squeeze=squeeze,
+ memory_map=memory_map,
+ float_precision=float_precision,
+ na_filter=na_filter,
+ delim_whitespace=delim_whitespace,
+ warn_bad_lines=warn_bad_lines,
+ error_bad_lines=error_bad_lines,
+ low_memory=low_memory,
+ mangle_dupe_cols=mangle_dupe_cols,
+ infer_datetime_format=infer_datetime_format,
+ skip_blank_lines=skip_blank_lines,
+ )
+ return _read(filepath_or_buffer, kwds)
-read_csv = _make_parser_function("read_csv", default_sep=",")
-read_csv = Appender(
- _doc_read_csv_and_table.format(
- func_name="read_csv",
- summary="Read a comma-separated values (csv) file into DataFrame.",
- _default_sep="','",
- )
-)(read_csv)
-read_table = _make_parser_function("read_table", default_sep="\t")
-read_table = Appender(
+@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
summary="Read general delimited file into DataFrame.",
_default_sep=r"'\\t' (tab-stop)",
)
-)(read_table)
+)
+def read_table(
+ filepath_or_buffer: FilePathOrBuffer,
+ sep="\t",
+ delimiter=None,
+ # Column and Index Locations and Names
+ header="infer",
+ names=None,
+ index_col=None,
+ usecols=None,
+ squeeze=False,
+ prefix=None,
+ mangle_dupe_cols=True,
+ # General Parsing Configuration
+ dtype=None,
+ engine=None,
+ converters=None,
+ true_values=None,
+ false_values=None,
+ skipinitialspace=False,
+ skiprows=None,
+ skipfooter=0,
+ nrows=None,
+ # NA and Missing Data Handling
+ na_values=None,
+ keep_default_na=True,
+ na_filter=True,
+ verbose=False,
+ skip_blank_lines=True,
+ # Datetime Handling
+ parse_dates=False,
+ infer_datetime_format=False,
+ keep_date_col=False,
+ date_parser=None,
+ dayfirst=False,
+ cache_dates=True,
+ # Iteration
+ iterator=False,
+ chunksize=None,
+ # Quoting, Compression, and File Format
+ compression="infer",
+ thousands=None,
+ decimal: str = ".",
+ lineterminator=None,
+ quotechar='"',
+ quoting=csv.QUOTE_MINIMAL,
+ doublequote=True,
+ escapechar=None,
+ comment=None,
+ encoding=None,
+ dialect=None,
+ # Error Handling
+ error_bad_lines=True,
+ warn_bad_lines=True,
+ # Internal
+ delim_whitespace=False,
+ low_memory=_c_parser_defaults["low_memory"],
+ memory_map=False,
+ float_precision=None,
+):
+ return read_csv(**locals())
def read_fwf(
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index e6e868689b060..12e73bae40eac 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -5,6 +5,7 @@
import codecs
import csv
from datetime import datetime
+from inspect import signature
from io import StringIO
import os
import platform
@@ -2071,6 +2072,39 @@ def test_read_csv_raises_on_header_prefix(all_parsers):
parser.read_csv(s, header=0, prefix="_X")
+def test_unexpected_keyword_parameter_exception(all_parsers):
+ # GH-34976
+ parser = all_parsers
+
+ msg = "{}\\(\\) got an unexpected keyword argument 'foo'"
+ with pytest.raises(TypeError, match=msg.format("read_csv")):
+ parser.read_csv("foo.csv", foo=1)
+ with pytest.raises(TypeError, match=msg.format("read_table")):
+ parser.read_table("foo.tsv", foo=1)
+
+
+def test_read_table_same_signature_as_read_csv(all_parsers):
+ # GH-34976
+ parser = all_parsers
+
+ table_sign = signature(parser.read_table)
+ csv_sign = signature(parser.read_csv)
+
+ assert table_sign.parameters.keys() == csv_sign.parameters.keys()
+ assert table_sign.return_annotation == csv_sign.return_annotation
+
+ for key, csv_param in csv_sign.parameters.items():
+ table_param = table_sign.parameters[key]
+ if key == "sep":
+ assert csv_param.default == ","
+ assert table_param.default == "\t"
+ assert table_param.annotation == csv_param.annotation
+ assert table_param.kind == csv_param.kind
+ continue
+ else:
+ assert table_param == csv_param
+
+
def test_read_table_equivalency_to_read_csv(all_parsers):
# see gh-21948
# As of 0.25.0, read_table is undeprecated
| - [x] closes #25648
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
In master the type of ``read_csv`` and ``read_table`` is 'Any'. This PR makes the functions signatures discoverable by mypy. Also fixes the error message when wrong kwarg is passed. | https://api.github.com/repos/pandas-dev/pandas/pulls/34976 | 2020-06-24T18:42:37Z | 2020-06-25T17:38:09Z | 2020-06-25T17:38:09Z | 2020-06-25T17:43:19Z |
REF: add Tick and BaseOffset to tslibs namespace | diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py
index 6f173a4542bb0..76e356370de70 100644
--- a/pandas/_libs/tslibs/__init__.py
+++ b/pandas/_libs/tslibs/__init__.py
@@ -16,13 +16,15 @@
"Timestamp",
"tz_convert_single",
"to_offset",
+ "Tick",
+ "BaseOffset",
]
from . import dtypes
from .conversion import localize_pydatetime
from .nattype import NaT, NaTType, iNaT, is_null_datetimelike, nat_strings
from .np_datetime import OutOfBoundsDatetime
-from .offsets import to_offset
+from .offsets import BaseOffset, Tick, to_offset
from .period import IncompatibleFrequency, Period
from .resolution import Resolution
from .timedeltas import Timedelta, delta_to_nanoseconds, ints_to_pytimedelta
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 8a2626f9a7e68..0484de3fa165d 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -9,9 +9,9 @@
from typing import TYPE_CHECKING, Optional
import warnings
-from pandas import Index
+from pandas._libs.tslibs import BaseOffset
-from pandas.tseries.offsets import DateOffset
+from pandas import Index
if TYPE_CHECKING:
from pandas import Series, DataFrame
@@ -42,7 +42,7 @@ def load_reduce(self):
return
except TypeError:
pass
- elif args and issubclass(args[0], DateOffset):
+ elif args and issubclass(args[0], BaseOffset):
# TypeError: object.__new__(Day) is not safe, use Day.__new__()
cls = args[0]
stack[-1] = cls.__new__(*args)
diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py
index 3b090ca458d88..14b442bf71080 100644
--- a/pandas/core/arrays/_ranges.py
+++ b/pandas/core/arrays/_ranges.py
@@ -7,16 +7,14 @@
import numpy as np
-from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp
-
-from pandas.tseries.offsets import DateOffset
+from pandas._libs.tslibs import BaseOffset, OutOfBoundsDatetime, Timedelta, Timestamp
def generate_regular_range(
start: Union[Timestamp, Timedelta],
end: Union[Timestamp, Timedelta],
periods: int,
- freq: DateOffset,
+ freq: BaseOffset,
):
"""
Generate a range of dates or timestamps with the spans between dates
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py
index 1fea6ca1b8a3d..a306268cd8ede 100644
--- a/pandas/core/arrays/datetimelike.py
+++ b/pandas/core/arrays/datetimelike.py
@@ -7,10 +7,12 @@
from pandas._libs import algos, lib
from pandas._libs.tslibs import (
+ BaseOffset,
NaT,
NaTType,
Period,
Resolution,
+ Tick,
Timestamp,
delta_to_nanoseconds,
iNaT,
@@ -62,7 +64,6 @@
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
-from pandas.tseries.offsets import DateOffset, Tick
DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType]
@@ -421,7 +422,7 @@ def _with_freq(self, freq):
if freq is None:
# Always valid
pass
- elif len(self) == 0 and isinstance(freq, DateOffset):
+ elif len(self) == 0 and isinstance(freq, BaseOffset):
# Always valid. In the TimedeltaArray case, we assume this
# is a Tick offset.
pass
@@ -1398,7 +1399,7 @@ def __add__(self, other):
result = self._add_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_timedeltalike_scalar(other)
- elif isinstance(other, DateOffset):
+ elif isinstance(other, BaseOffset):
# specifically _not_ a Tick
result = self._add_offset(other)
elif isinstance(other, (datetime, np.datetime64)):
@@ -1454,7 +1455,7 @@ def __sub__(self, other):
result = self._sub_nat()
elif isinstance(other, (Tick, timedelta, np.timedelta64)):
result = self._add_timedeltalike_scalar(-other)
- elif isinstance(other, DateOffset):
+ elif isinstance(other, BaseOffset):
# specifically _not_ a Tick
result = self._add_offset(-other)
elif isinstance(other, (datetime, np.datetime64)):
@@ -1778,7 +1779,7 @@ def maybe_infer_freq(freq):
Whether we should inherit the freq of passed data.
"""
freq_infer = False
- if not isinstance(freq, DateOffset):
+ if not isinstance(freq, BaseOffset):
# if a passed freq is None, don't infer automatically
if freq != "infer":
freq = to_offset(freq)
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py
index 7902dd0410910..4b4df3445be4e 100644
--- a/pandas/core/arrays/period.py
+++ b/pandas/core/arrays/period.py
@@ -5,6 +5,7 @@
import numpy as np
from pandas._libs.tslibs import (
+ BaseOffset,
NaT,
NaTType,
Timedelta,
@@ -48,8 +49,6 @@
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
-from pandas.tseries.offsets import DateOffset
-
def _field_accessor(name: str, docstring=None):
def f(self):
@@ -280,7 +279,7 @@ def dtype(self) -> PeriodDtype:
# error: Read-only property cannot override read-write property [misc]
@property # type: ignore
- def freq(self) -> DateOffset:
+ def freq(self) -> BaseOffset:
"""
Return the frequency object for this PeriodArray.
"""
@@ -656,7 +655,7 @@ def _addsub_int_array(
res_values[self._isnan] = iNaT
return type(self)(res_values, freq=self.freq)
- def _add_offset(self, other: DateOffset):
+ def _add_offset(self, other: BaseOffset):
assert not isinstance(other, Tick)
if other.base != self.freq.base:
@@ -784,7 +783,7 @@ def raise_on_incompatible(left, right):
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None:
other_freq = None
- elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)):
+ elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, BaseOffset)):
other_freq = right.freqstr
else:
other_freq = delta_to_tick(Timedelta(right)).freqstr
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py
index f33b569b3d1f7..a378423df788b 100644
--- a/pandas/core/arrays/timedeltas.py
+++ b/pandas/core/arrays/timedeltas.py
@@ -4,7 +4,7 @@
import numpy as np
from pandas._libs import lib, tslibs
-from pandas._libs.tslibs import NaT, Period, Timedelta, Timestamp, iNaT, to_offset
+from pandas._libs.tslibs import NaT, Period, Tick, Timedelta, Timestamp, iNaT, to_offset
from pandas._libs.tslibs.conversion import precision_from_unit
from pandas._libs.tslibs.fields import get_timedelta_field
from pandas._libs.tslibs.timedeltas import array_to_timedelta64, parse_timedelta_unit
@@ -35,8 +35,6 @@
from pandas.core.construction import extract_array
from pandas.core.ops.common import unpack_zerodim_and_defer
-from pandas.tseries.offsets import Tick
-
def _field_accessor(name, alias, docstring=None):
def f(self):
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 61361c3331d5e..eda1ba844b5ac 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -31,7 +31,7 @@
from pandas._config import config
from pandas._libs import lib
-from pandas._libs.tslibs import Timestamp, to_offset
+from pandas._libs.tslibs import Tick, Timestamp, to_offset
from pandas._typing import (
Axis,
FilePathOrBuffer,
@@ -101,7 +101,6 @@
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
-from pandas.tseries.offsets import Tick
if TYPE_CHECKING:
from pandas.core.resample import Resampler
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index ca6eb45e22c69..49b8ec3276e37 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -7,7 +7,7 @@
import numpy as np
from pandas._libs import NaT, Timedelta, iNaT, join as libjoin, lib
-from pandas._libs.tslibs import Resolution, timezones
+from pandas._libs.tslibs import BaseOffset, Resolution, Tick, timezones
from pandas._libs.tslibs.parsing import DateParseError
from pandas._typing import Label
from pandas.compat.numpy import function as nv
@@ -44,8 +44,6 @@
from pandas.core.sorting import ensure_key_mapped
from pandas.core.tools.timedeltas import to_timedelta
-from pandas.tseries.offsets import DateOffset, Tick
-
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
@@ -91,7 +89,7 @@ class DatetimeIndexOpsMixin(ExtensionIndex):
"""
_data: Union[DatetimeArray, TimedeltaArray, PeriodArray]
- freq: Optional[DateOffset]
+ freq: Optional[BaseOffset]
freqstr: Optional[str]
_resolution_obj: Resolution
_bool_ops: List[str] = []
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 3be2bcd4888cb..f7a7b382b853f 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -9,7 +9,7 @@
from pandas._libs import lib
from pandas._libs.interval import Interval, IntervalMixin, IntervalTree
-from pandas._libs.tslibs import Timedelta, Timestamp, to_offset
+from pandas._libs.tslibs import BaseOffset, Timedelta, Timestamp, to_offset
from pandas._typing import AnyArrayLike, Label
from pandas.errors import InvalidIndexError
from pandas.util._decorators import Appender, Substitution, cache_readonly
@@ -56,8 +56,6 @@
from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
from pandas.core.ops import get_op_result_name
-from pandas.tseries.offsets import DateOffset
-
_VALID_CLOSED = {"left", "right", "both", "neither"}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
@@ -1161,8 +1159,8 @@ def _is_type_compatible(a, b) -> bool:
"""
Helper for interval_range to check type compat of start/end/freq.
"""
- is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
- is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
+ is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))
+ is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))
return (
(is_number(a) and is_number(b))
or (is_ts_compat(a) and is_ts_compat(b))
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 68c2b44b23964..03e11b652477f 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -5,7 +5,7 @@
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
-from pandas._libs.tslibs import Period, Resolution
+from pandas._libs.tslibs import BaseOffset, Period, Resolution, Tick
from pandas._libs.tslibs.parsing import DateParseError, parse_time_string
from pandas._typing import DtypeObj, Label
from pandas.errors import InvalidIndexError
@@ -43,8 +43,6 @@
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
-from pandas.tseries.offsets import DateOffset, Tick
-
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="PeriodIndex or list of Periods"))
@@ -145,7 +143,7 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
_is_numeric_dtype = False
_data: PeriodArray
- freq: DateOffset
+ freq: BaseOffset
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
@@ -287,7 +285,7 @@ def _maybe_convert_timedelta(self, other):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
- elif isinstance(other, DateOffset):
+ elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py
index 7d76f8b117b5e..8cb53ebd92214 100644
--- a/pandas/core/window/rolling.py
+++ b/pandas/core/window/rolling.py
@@ -10,7 +10,7 @@
import numpy as np
-from pandas._libs.tslibs import to_offset
+from pandas._libs.tslibs import BaseOffset, to_offset
import pandas._libs.window.aggregations as window_aggregations
from pandas._typing import Axis, FrameOrSeries, Scalar
from pandas.compat._optional import import_optional_dependency
@@ -55,8 +55,6 @@
)
from pandas.core.window.numba_ import generate_numba_apply_func
-from pandas.tseries.offsets import DateOffset
-
def calculate_center_offset(window) -> int:
"""
@@ -1935,7 +1933,7 @@ def validate(self):
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
- self.window, (str, DateOffset, timedelta)
+ self.window, (str, BaseOffset, timedelta)
):
self._validate_monotonic()
diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py
index 8ffd30567b9ac..8f3571cf13cbc 100644
--- a/pandas/plotting/_matplotlib/timeseries.py
+++ b/pandas/plotting/_matplotlib/timeseries.py
@@ -5,7 +5,7 @@
import numpy as np
-from pandas._libs.tslibs import Period, to_offset
+from pandas._libs.tslibs import BaseOffset, Period, to_offset
from pandas._libs.tslibs.dtypes import FreqGroup
from pandas._typing import FrameOrSeriesUnion
@@ -22,7 +22,6 @@
TimeSeries_TimedeltaFormatter,
)
from pandas.tseries.frequencies import get_period_alias, is_subperiod, is_superperiod
-from pandas.tseries.offsets import DateOffset
if TYPE_CHECKING:
from pandas import Series, Index # noqa:F401
@@ -218,7 +217,7 @@ def _use_dynamic_x(ax, data: "FrameOrSeriesUnion") -> bool:
return True
-def _get_index_freq(index: "Index") -> Optional[DateOffset]:
+def _get_index_freq(index: "Index") -> Optional[BaseOffset]:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index fa129167a744f..201856669103a 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -6,18 +6,17 @@
import numpy as np
import pytest
-from pandas._libs.tslibs import to_offset
+from pandas._libs.tslibs import BaseOffset, to_offset
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, NaT, Series, isna
import pandas._testing as tm
-from pandas.core.indexes.datetimes import bdate_range, date_range
+from pandas.core.indexes.datetimes import DatetimeIndex, bdate_range, date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.indexes.timedeltas import timedelta_range
-from pandas.core.resample import DatetimeIndex
from pandas.tests.plotting.common import TestPlotBase
-from pandas.tseries.offsets import DateOffset, WeekOfMonth
+from pandas.tseries.offsets import WeekOfMonth
@td.skip_if_no_mpl
@@ -1509,7 +1508,7 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
ax = kwargs.pop("ax", plt.gca())
if series is not None:
dfreq = series.index.freq
- if isinstance(dfreq, DateOffset):
+ if isinstance(dfreq, BaseOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py
index a119db6c68635..840a8c2fb68b1 100644
--- a/pandas/tests/tslibs/test_api.py
+++ b/pandas/tests/tslibs/test_api.py
@@ -25,6 +25,7 @@ def test_namespace():
]
api = [
+ "BaseOffset",
"NaT",
"NaTType",
"iNaT",
@@ -34,6 +35,7 @@ def test_namespace():
"Period",
"IncompatibleFrequency",
"Resolution",
+ "Tick",
"Timedelta",
"Timestamp",
"delta_to_nanoseconds",
| Update imports appropriately. | https://api.github.com/repos/pandas-dev/pandas/pulls/34963 | 2020-06-24T02:13:38Z | 2020-06-24T22:18:59Z | 2020-06-24T22:18:59Z | 2020-06-24T22:25:21Z |
REF: dont consolidate in BlockManager.equals | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 0c98a779424bd..ea9c697345ba9 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -55,12 +55,7 @@
ABCPandasArray,
ABCSeries,
)
-from pandas.core.dtypes.missing import (
- _isna_compat,
- array_equivalent,
- is_valid_nat_for_dtype,
- isna,
-)
+from pandas.core.dtypes.missing import _isna_compat, is_valid_nat_for_dtype, isna
import pandas.core.algorithms as algos
from pandas.core.array_algos.transforms import shift
@@ -1367,11 +1362,6 @@ def where_func(cond, values, other):
return result_blocks
- def equals(self, other) -> bool:
- if self.dtype != other.dtype or self.shape != other.shape:
- return False
- return array_equivalent(self.values, other.values)
-
def _unstack(self, unstacker, fill_value, new_placement):
"""
Return a list of unstacked blocks of self
@@ -1865,9 +1855,6 @@ def where(
return [self.make_block_same_class(result, placement=self.mgr_locs)]
- def equals(self, other) -> bool:
- return self.values.equals(other.values)
-
def _unstack(self, unstacker, fill_value, new_placement):
# ExtensionArray-safe unstack.
# We override ObjectBlock._unstack, which unstacks directly on the
@@ -1913,12 +1900,6 @@ class NumericBlock(Block):
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
- def equals(self, other) -> bool:
- if self.dtype != other.dtype or self.shape != other.shape:
- return False
- left, right = self.values, other.values
- return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
-
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
@@ -2282,12 +2263,6 @@ def setitem(self, indexer, value):
)
return newb.setitem(indexer, value)
- def equals(self, other) -> bool:
- # override for significant performance improvement
- if self.dtype != other.dtype or self.shape != other.shape:
- return False
- return (self.values.view("i8") == other.values.view("i8")).all()
-
def quantile(self, qs, interpolation="linear", axis=0):
naive = self.values.view("M8[ns]")
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index 6055a6205d286..51865ad1c3e1a 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -19,6 +19,7 @@
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_datetimelike_v_numeric,
+ is_dtype_equal,
is_extension_array_dtype,
is_list_like,
is_numeric_v_string_like,
@@ -27,9 +28,10 @@
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
-from pandas.core.dtypes.missing import isna
+from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
+from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.base import PandasObject
import pandas.core.common as com
@@ -1409,29 +1411,39 @@ def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True
new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
)
- def equals(self, other) -> bool:
+ def equals(self, other: "BlockManager") -> bool:
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
- self._consolidate_inplace()
- other._consolidate_inplace()
- if len(self.blocks) != len(other.blocks):
- return False
- # canonicalize block order, using a tuple combining the mgr_locs
- # then type name because there might be unconsolidated
- # blocks (say, Categorical) which can only be distinguished by
- # the iteration order
- def canonicalize(block):
- return (block.mgr_locs.as_array.tolist(), block.dtype.name)
-
- self_blocks = sorted(self.blocks, key=canonicalize)
- other_blocks = sorted(other.blocks, key=canonicalize)
- return all(
- block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks)
- )
+ if self.ndim == 1:
+ # For SingleBlockManager (i.e.Series)
+ if other.ndim != 1:
+ return False
+ left = self.blocks[0].values
+ right = other.blocks[0].values
+ if not is_dtype_equal(left.dtype, right.dtype):
+ return False
+ elif isinstance(left, ExtensionArray):
+ return left.equals(right)
+ else:
+ return array_equivalent(left, right)
+
+ for i in range(len(self.items)):
+ # Check column-wise, return False if any column doesnt match
+ left = self.iget_values(i)
+ right = other.iget_values(i)
+ if not is_dtype_equal(left.dtype, right.dtype):
+ return False
+ elif isinstance(left, ExtensionArray):
+ if not left.equals(right):
+ return False
+ else:
+ if not array_equivalent(left, right):
+ return False
+ return True
def unstack(self, unstacker, fill_value) -> "BlockManager":
"""
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index 5fd44d7cd74a9..06ccdd2484a2a 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -377,7 +377,7 @@ def test_copy(self, mgr):
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# view assertion
- assert cp_blk.equals(blk)
+ tm.assert_equal(cp_blk.values, blk.values)
if isinstance(blk.values, np.ndarray):
assert cp_blk.values.base is blk.values.base
else:
@@ -389,7 +389,7 @@ def test_copy(self, mgr):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
- assert cp_blk.equals(blk)
+ tm.assert_equal(cp_blk.values, blk.values)
if not isinstance(cp_blk.values, np.ndarray):
assert cp_blk.values._data.base is not blk.values._data.base
else:
| Avoid side-effects.
Side-note: with EA.equals recently added to the interface, should we update array_equivalent to use it? ATM array_equivalent starts by casting both inputs to ndarray. | https://api.github.com/repos/pandas-dev/pandas/pulls/34962 | 2020-06-24T01:57:30Z | 2020-07-06T00:12:47Z | 2020-07-06T00:12:47Z | 2020-07-08T16:43:31Z |
DOC: misc sphinx directive fixes | diff --git a/pandas/core/common.py b/pandas/core/common.py
index af24f8d707abd..b4f726f4e59a9 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -404,7 +404,7 @@ def random_state(state=None):
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
- ..versionchanged:: 1.1.0
+ .. versionchanged:: 1.1.0
array-like and BitGenerator (for NumPy>=1.18) object now passed to
np.random.RandomState() as seed
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 55b30100175ae..69f309ee6decc 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -8212,7 +8212,7 @@ def cov(
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
- versionadded:: 1.1.0
+ .. versionadded:: 1.1.0
Returns
-------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index fa92f702f07f5..61361c3331d5e 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4809,7 +4809,7 @@ def sample(
random number generator
If np.random.RandomState, use as numpy RandomState object.
- ..versionchanged:: 1.1.0
+ .. versionchanged:: 1.1.0
array-like and BitGenerator (for NumPy>=1.17) object now passed to
np.random.RandomState() as seed
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 1aeb6271056c6..3674537c0137e 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2342,7 +2342,7 @@ def cov(
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
- versionadded:: 1.1.0
+ .. versionadded:: 1.1.0
Returns
-------
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 4f5b7b2d7a888..4eb68367560b6 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -67,7 +67,7 @@ def hist_series(
legend : bool, default False
Whether to show the legend.
- ..versionadded:: 1.1.0
+ .. versionadded:: 1.1.0
**kwargs
To be passed to the actual plotting function.
@@ -179,7 +179,7 @@ def hist_frame(
legend : bool, default False
Whether to show the legend.
- ..versionadded:: 1.1.0
+ .. versionadded:: 1.1.0
**kwargs
All other plotting keyword arguments to be passed to
| https://api.github.com/repos/pandas-dev/pandas/pulls/34960 | 2020-06-23T18:38:57Z | 2020-06-23T20:08:43Z | 2020-06-23T20:08:43Z | 2020-06-24T13:05:24Z | |
ENH: GH34946 Check type of names argument to `read_csv`, `read_table`… | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 60aa1759958f6..9d151c78b2048 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1121,6 +1121,7 @@ Other
- :class:`IntegerArray` now implements the ``sum`` operation (:issue:`33172`)
- Bug in :class:`Tick` comparisons raising ``TypeError`` when comparing against timedelta-like objects (:issue:`34088`)
- Bug in :class:`Tick` multiplication raising ``TypeError`` when multiplying by a float (:issue:`34486`)
+- Passing a `set` as `names` argument to :func:`pandas.read_csv`, :func:`pandas.read_table`, or :func:`pandas.read_fwf` will raise ``ValueError: Names should be an ordered collection.`` (:issue:`34946`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 679cf4c2d8929..62347f7110d76 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -397,7 +397,8 @@ def _validate_integer(name, val, min_val=0):
def _validate_names(names):
"""
- Raise ValueError if the `names` parameter contains duplicates.
+ Raise ValueError if the `names` parameter contains duplicates or has an
+ invalid data type.
Parameters
----------
@@ -407,11 +408,13 @@ def _validate_names(names):
Raises
------
ValueError
- If names are not unique.
+ If names are not unique or are not ordered (e.g. set).
"""
if names is not None:
if len(names) != len(set(names)):
raise ValueError("Duplicate names are not allowed.")
+ if not is_list_like(names, allow_sets=False):
+ raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index e38fcf1380220..e6e868689b060 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -2135,3 +2135,13 @@ def test_no_header_two_extra_columns(all_parsers):
parser = all_parsers
df = parser.read_csv(stream, header=None, names=column_names, index_col=False)
tm.assert_frame_equal(df, ref)
+
+
+def test_read_csv_names_not_accepting_sets(all_parsers):
+ # GH 34946
+ data = """\
+ 1,2,3
+ 4,5,6\n"""
+ parser = all_parsers
+ with pytest.raises(ValueError, match="Names should be an ordered collection."):
+ parser.read_csv(StringIO(data), names=set("QAZ"))
|
- [x] closes #34946
- [x] tests passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- ~~One test failed (TestTSPlot.test_ts_plot_with_tz) but it's not related to what I changed.~~
- ~~Does it need a whatsnew entry?~~ | https://api.github.com/repos/pandas-dev/pandas/pulls/34956 | 2020-06-23T16:37:24Z | 2020-06-24T13:25:58Z | 2020-06-24T13:25:57Z | 2020-06-24T16:13:43Z |
DOC: Clarify in pandas.DataFrame.drop that a copy is returned when inplace = False. | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d12ebeafe8510..990d295a0b6fe 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4006,7 +4006,8 @@ def drop(
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
- If True, do operation inplace and return None.
+ If False, return a copy. Otherwise, do operation
+ inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
| Clarify in `pandas.DataFrame.drop` that a copy is returned when `inplace = False`.
- [x] closes #33451
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34955 | 2020-06-23T16:06:59Z | 2020-06-23T19:44:20Z | 2020-06-23T19:44:19Z | 2020-06-24T08:10:19Z |
Fix Issue 34748 - read in datetime as MultiIndex for column headers | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 46e0d2a1164e1..ace930f9ef3ae 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1051,6 +1051,7 @@ I/O
- Bug in :meth:`~HDFStore.create_table` now raises an error when `column` argument was not specified in `data_columns` on input (:issue:`28156`)
- :meth:`read_json` now could read line-delimited json file from a file url while `lines` and `chunksize` are set.
- Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`)
+- Bug in "meth"`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`)
Plotting
^^^^^^^^
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index c427d3a198b10..d4f346f8c1087 100644
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1614,7 +1614,7 @@ def extract(r):
# Clean the column names (if we have an index_col).
if len(ic):
col_names = [
- r[0] if (len(r[0]) and r[0] not in self.unnamed_cols) else None
+ r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header
]
else:
diff --git a/pandas/tests/io/data/excel/test_datetime_mi.ods b/pandas/tests/io/data/excel/test_datetime_mi.ods
new file mode 100644
index 0000000000000..c37c35060c650
Binary files /dev/null and b/pandas/tests/io/data/excel/test_datetime_mi.ods differ
diff --git a/pandas/tests/io/data/excel/test_datetime_mi.xls b/pandas/tests/io/data/excel/test_datetime_mi.xls
new file mode 100644
index 0000000000000..aeade05855919
Binary files /dev/null and b/pandas/tests/io/data/excel/test_datetime_mi.xls differ
diff --git a/pandas/tests/io/data/excel/test_datetime_mi.xlsb b/pandas/tests/io/data/excel/test_datetime_mi.xlsb
new file mode 100644
index 0000000000000..0984c020a4c54
Binary files /dev/null and b/pandas/tests/io/data/excel/test_datetime_mi.xlsb differ
diff --git a/pandas/tests/io/data/excel/test_datetime_mi.xlsm b/pandas/tests/io/data/excel/test_datetime_mi.xlsm
new file mode 100644
index 0000000000000..55fb88912afb9
Binary files /dev/null and b/pandas/tests/io/data/excel/test_datetime_mi.xlsm differ
diff --git a/pandas/tests/io/data/excel/test_datetime_mi.xlsx b/pandas/tests/io/data/excel/test_datetime_mi.xlsx
new file mode 100644
index 0000000000000..0ffee0a8b79a3
Binary files /dev/null and b/pandas/tests/io/data/excel/test_datetime_mi.xlsx differ
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 955db982f8300..ddc631532194a 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -1143,3 +1143,22 @@ def test_header_with_index_col(self, engine, filename):
filename, sheet_name="Sheet1", index_col=0, header=[0, 1]
)
tm.assert_frame_equal(expected, result)
+
+ def test_read_datetime_multiindex(self, engine, read_ext):
+ # GH 34748
+ if engine == "pyxlsb":
+ pytest.xfail("Sheets containing datetimes not supported by pyxlsb")
+
+ f = "test_datetime_mi" + read_ext
+ with pd.ExcelFile(f) as excel:
+ actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine)
+ expected_column_index = pd.MultiIndex.from_tuples(
+ [(pd.to_datetime("02/29/2020"), pd.to_datetime("03/01/2020"))],
+ names=[
+ pd.to_datetime("02/29/2020").to_pydatetime(),
+ pd.to_datetime("03/01/2020").to_pydatetime(),
+ ],
+ )
+ expected = pd.DataFrame([], columns=expected_column_index)
+
+ tm.assert_frame_equal(expected, actual)
| - [x] closes #34748
- [x] tests added / passed
- `pandas/tests/io/excel/test_readers.py`, method `test_read_datetime_multiindex`
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34954 | 2020-06-23T16:04:14Z | 2020-07-08T22:03:48Z | 2020-07-08T22:03:47Z | 2023-02-13T20:50:50Z |
PERF: Fixed cut regression, improve Categorical | diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index 107b9b9edcd5d..a0b24342091ec 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -34,6 +34,7 @@ def setup(self):
self.values_all_int8 = np.ones(N, "int8")
self.categorical = pd.Categorical(self.values, self.categories)
self.series = pd.Series(self.categorical)
+ self.intervals = pd.interval_range(0, 1, periods=N // 10)
def time_regular(self):
pd.Categorical(self.values, self.categories)
@@ -44,6 +45,9 @@ def time_fastpath(self):
def time_datetimes(self):
pd.Categorical(self.datetimes)
+ def time_interval(self):
+ pd.Categorical(self.datetimes, categories=self.datetimes)
+
def time_datetimes_with_nat(self):
pd.Categorical(self.datetimes_with_nat)
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 22b83425b58c2..a190f01101ac3 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -805,6 +805,8 @@ Performance improvements
- Performance improvement for groupby methods :meth:`~pandas.core.groupby.groupby.Groupby.first`
and :meth:`~pandas.core.groupby.groupby.Groupby.last` (:issue:`34178`)
- Performance improvement in :func:`factorize` for nullable (integer and boolean) dtypes (:issue:`33064`).
+- Performance improvement when constructing :class:`Categorical` objects (:issue:`33921`)
+- Fixed performance regression in :func:`pandas.qcut` and :func:`pandas.cut` (:issue:`33921`)
- Performance improvement in reductions (sum, prod, min, max) for nullable (integer and boolean) dtypes (:issue:`30982`, :issue:`33261`, :issue:`33442`).
- Performance improvement in arithmetic operations between two :class:`DataFrame` objects (:issue:`32779`)
- Performance improvement in :class:`pandas.core.groupby.RollingGroupby` (:issue:`34052`)
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 80fe1ac7ce619..3d469ec28b9c4 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -2611,6 +2611,11 @@ def _get_codes_for_values(values, categories):
values = ensure_object(values)
categories = ensure_object(categories)
+ if isinstance(categories, ABCIndexClass):
+ return coerce_indexer_dtype(categories.get_indexer_for(values), categories)
+
+ # Only hit here when we've already coerced to object dtypee.
+
hash_klass, vals = _get_data_algo(values)
_, cats = _get_data_algo(categories)
t = hash_klass(len(cats))
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py
index 9be741274c15a..ca942c9288898 100644
--- a/pandas/tests/arrays/categorical/test_constructors.py
+++ b/pandas/tests/arrays/categorical/test_constructors.py
@@ -643,3 +643,45 @@ def test_constructor_string_and_tuples(self):
c = pd.Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object))
expected_index = pd.Index([("a", "b"), ("b", "a"), "c"])
assert c.categories.equals(expected_index)
+
+ def test_interval(self):
+ idx = pd.interval_range(0, 10, periods=10)
+ cat = pd.Categorical(idx, categories=idx)
+ expected_codes = np.arange(10, dtype="int8")
+ tm.assert_numpy_array_equal(cat.codes, expected_codes)
+ tm.assert_index_equal(cat.categories, idx)
+
+ # infer categories
+ cat = pd.Categorical(idx)
+ tm.assert_numpy_array_equal(cat.codes, expected_codes)
+ tm.assert_index_equal(cat.categories, idx)
+
+ # list values
+ cat = pd.Categorical(list(idx))
+ tm.assert_numpy_array_equal(cat.codes, expected_codes)
+ tm.assert_index_equal(cat.categories, idx)
+
+ # list values, categories
+ cat = pd.Categorical(list(idx), categories=list(idx))
+ tm.assert_numpy_array_equal(cat.codes, expected_codes)
+ tm.assert_index_equal(cat.categories, idx)
+
+ # shuffled
+ values = idx.take([1, 2, 0])
+ cat = pd.Categorical(values, categories=idx)
+ tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="int8"))
+ tm.assert_index_equal(cat.categories, idx)
+
+ # extra
+ values = pd.interval_range(8, 11, periods=3)
+ cat = pd.Categorical(values, categories=idx)
+ expected_codes = np.array([8, 9, -1], dtype="int8")
+ tm.assert_numpy_array_equal(cat.codes, expected_codes)
+ tm.assert_index_equal(cat.categories, idx)
+
+ # overlapping
+ idx = pd.IntervalIndex([pd.Interval(0, 2), pd.Interval(0, 1)])
+ cat = pd.Categorical(idx, categories=idx)
+ expected_codes = np.array([0, 1], dtype="int8")
+ tm.assert_numpy_array_equal(cat.codes, expected_codes)
+ tm.assert_index_equal(cat.categories, idx)
| This has two changes to address a performance regression in
cut / qcut.
~1. Avoid an unnecessary `set` conversion in cut.~ (reverted for now)
2. Aviod a costly conversion to object in the Categoriacl constructor
for dtypes we don't have a hashtable for.
```python
In [2]: idx = pd.interval_range(0, 1, periods=10000)
In [3]: %timeit pd.Categorical(idx, idx)
```
```
# 1.0.4
10.4 ms ± 351 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
# master
256 ms ± 5.85 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
# HEAD
53.2 µs ± 1.26 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
```
And for the qcut ASV
```
# 1.0.4
58.5 ms ± 3.13 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
# master
134 ms ± 9.5 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
# HEAD
53.6 ms ± 1.06 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
```
Closes https://github.com/pandas-dev/pandas/issues/33921 | https://api.github.com/repos/pandas-dev/pandas/pulls/34952 | 2020-06-23T14:43:50Z | 2020-06-24T22:37:00Z | 2020-06-24T22:37:00Z | 2020-06-25T11:26:09Z |
Correct the misprint at advanced.rst | diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst
index d6f5c0c758b60..a0331dd632583 100644
--- a/doc/source/user_guide/advanced.rst
+++ b/doc/source/user_guide/advanced.rst
@@ -260,7 +260,9 @@ You don't have to specify all levels of the ``MultiIndex`` by passing only the
first elements of the tuple. For example, you can use "partial" indexing to
get all elements with ``bar`` in the first level as follows:
-df.loc['bar']
+.. ipython:: python
+
+ df.loc['bar']
This is a shortcut for the slightly more verbose notation ``df.loc[('bar',),]`` (equivalent
to ``df.loc['bar',]`` in this example).
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34950 | 2020-06-23T14:19:32Z | 2020-06-23T16:02:26Z | 2020-06-23T16:02:26Z | 2020-06-23T16:02:30Z |
CLN: Removed unnecessary variable call | diff --git a/pandas/tests/series/methods/test_explode.py b/pandas/tests/series/methods/test_explode.py
index 979199e1efc62..a25cfadf12467 100644
--- a/pandas/tests/series/methods/test_explode.py
+++ b/pandas/tests/series/methods/test_explode.py
@@ -88,7 +88,6 @@ def test_typical_usecase():
columns=["var1", "var2"],
)
exploded = df.var1.str.split(",").explode()
- exploded
result = df[["var2"]].join(exploded)
expected = pd.DataFrame(
{"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")},
| - [x] closes #34934
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34949 | 2020-06-23T11:37:24Z | 2020-06-23T14:00:07Z | 2020-06-23T14:00:07Z | 2020-06-23T18:03:35Z |
Pd.series.map performance | diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst
index 89d94dc0cabd6..dbc88d0b371e8 100644
--- a/doc/source/whatsnew/v1.2.0.rst
+++ b/doc/source/whatsnew/v1.2.0.rst
@@ -207,6 +207,7 @@ Performance improvements
- Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`)
- Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`)
+- Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`)
- Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`)
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/series.py b/pandas/core/series.py
index ef9ade5c7bb15..747aabb26ca84 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -362,15 +362,19 @@ def _init_dict(self, data, index=None, dtype=None):
# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
# raises KeyError), so we iterate the entire dict, and align
if data:
- keys, values = zip(*data.items())
- values = list(values)
+ # GH:34717, issue was using zip to extract key and values from data.
+ # using generators in effects the performance.
+ # Below is the new way of extracting the keys and values
+
+ keys = tuple(data.keys())
+ values = list(data.values()) # Generating list of values- faster way
elif index is not None:
# fastpath for Series(data=None). Just use broadcasting a scalar
# instead of reindexing.
values = na_value_for_dtype(dtype)
keys = index
else:
- keys, values = [], []
+ keys, values = tuple([]), []
# Input is now list-like, so rely on "standard" construction:
| There are other places also to refactor to improve the performance, but this current change has greater impact aswell.
- [ ] closes #34717
- [ ] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry `improved the performance of pd.series.map`
| https://api.github.com/repos/pandas-dev/pandas/pulls/34948 | 2020-06-23T10:06:25Z | 2020-09-13T20:28:44Z | 2020-09-13T20:28:44Z | 2020-09-13T20:31:59Z |
DOC: Demonstrate custom rolling indexer with Businessday | diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index 19fdb541a6a45..897e5d5fb0e24 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -561,7 +561,7 @@ For example, if we have the following ``DataFrame``:
df
and we want to use an expanding window where ``use_expanding`` is ``True`` otherwise a window of size
-1, we can create the following ``BaseIndexer``:
+1, we can create the following ``BaseIndexer`` subclass:
.. code-block:: ipython
@@ -593,6 +593,8 @@ and we want to use an expanding window where ``use_expanding`` is ``True`` other
3 3.0
4 10.0
+You can view other examples of ``BaseIndexer`` subclasses `here <https://github.com/pandas-dev/pandas/blob/master/pandas/core/window/indexers.py>`__
+
.. versionadded:: 1.1
For some problems knowledge of the future is available for analysis. For example, this occurs when
diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py
index f0a76dc17b411..b710a35410458 100644
--- a/pandas/core/window/indexers.py
+++ b/pandas/core/window/indexers.py
@@ -1,4 +1,5 @@
"""Indexer objects for computing start/end window bounds for rolling operations"""
+from datetime import timedelta
from typing import Dict, Optional, Tuple, Type, Union
import numpy as np
@@ -6,6 +7,8 @@
from pandas._libs.window.indexers import calculate_variable_window_bounds
from pandas.util._decorators import Appender
+from pandas.tseries.offsets import Nano
+
get_window_bounds_doc = """
Computes the bounds of a window.
@@ -104,6 +107,88 @@ def get_window_bounds(
)
+class NonFixedVariableWindowIndexer(BaseIndexer):
+ """Calculate window boundaries based on a non-fixed offset such as a BusinessDay"""
+
+ def __init__(
+ self,
+ index_array: Optional[np.ndarray] = None,
+ window_size: int = 0,
+ index=None,
+ offset=None,
+ **kwargs,
+ ):
+ super().__init__(index_array, window_size, **kwargs)
+ self.index = index
+ self.offset = offset
+
+ @Appender(get_window_bounds_doc)
+ def get_window_bounds(
+ self,
+ num_values: int = 0,
+ min_periods: Optional[int] = None,
+ center: Optional[bool] = None,
+ closed: Optional[str] = None,
+ ) -> Tuple[np.ndarray, np.ndarray]:
+
+ # if windows is variable, default is 'right', otherwise default is 'both'
+ if closed is None:
+ closed = "right" if self.index is not None else "both"
+
+ right_closed = closed in ["right", "both"]
+ left_closed = closed in ["left", "both"]
+
+ if self.index[num_values - 1] < self.index[0]:
+ index_growth_sign = -1
+ else:
+ index_growth_sign = 1
+
+ start = np.empty(num_values, dtype="int64")
+ start.fill(-1)
+ end = np.empty(num_values, dtype="int64")
+ end.fill(-1)
+
+ start[0] = 0
+
+ # right endpoint is closed
+ if right_closed:
+ end[0] = 1
+ # right endpoint is open
+ else:
+ end[0] = 0
+
+ # start is start of slice interval (including)
+ # end is end of slice interval (not including)
+ for i in range(1, num_values):
+ end_bound = self.index[i]
+ start_bound = self.index[i] - index_growth_sign * self.offset
+
+ # left endpoint is closed
+ if left_closed:
+ start_bound -= Nano(1)
+
+ # advance the start bound until we are
+ # within the constraint
+ start[i] = i
+ for j in range(start[i - 1], i):
+ if (self.index[j] - start_bound) * index_growth_sign > timedelta(0):
+ start[i] = j
+ break
+
+ # end bound is previous end
+ # or current index
+ if (self.index[end[i - 1]] - end_bound) * index_growth_sign <= timedelta(0):
+ end[i] = i + 1
+ else:
+ end[i] = end[i - 1]
+
+ # right endpoint is open
+ if not right_closed:
+ end[i] -= 1
+
+ return start, end
+
+
class ExpandingIndexer(BaseIndexer):
"""Calculate expanding window bounds, mimicking df.expanding()"""
diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py
index df58028dee862..6f64a376b6fad 100644
--- a/pandas/tests/window/test_base_indexer.py
+++ b/pandas/tests/window/test_base_indexer.py
@@ -1,10 +1,12 @@
import numpy as np
import pytest
-from pandas import DataFrame, Series
+from pandas import DataFrame, Series, date_range
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer, FixedForwardWindowIndexer
-from pandas.core.window.indexers import ExpandingIndexer
+from pandas.core.window.indexers import ExpandingIndexer, NonFixedVariableWindowIndexer
+
+from pandas.tseries.offsets import BusinessDay
def test_bad_get_window_bounds_signature():
@@ -234,3 +236,20 @@ def test_rolling_forward_cov_corr(func, expected):
expected = Series(expected)
expected.name = result.name
tm.assert_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "closed,expected_data",
+ [
+ ["right", [0.0, 1.0, 2.0, 3.0, 7.0, 12.0, 6.0, 7.0, 8.0, 9.0]],
+ ["left", [0.0, 0.0, 1.0, 2.0, 5.0, 9.0, 5.0, 6.0, 7.0, 8.0]],
+ ],
+)
+def test_non_fixed_variable_window_indexer(closed, expected_data):
+ index = date_range("2020", periods=10)
+ df = DataFrame(range(10), index=index)
+ offset = BusinessDay(1)
+ indexer = NonFixedVariableWindowIndexer(index=index, offset=offset)
+ result = df.rolling(indexer, closed=closed).sum()
+ expected = DataFrame(expected_data, index=index)
+ tm.assert_frame_equal(result, expected)
| - [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/34947 | 2020-06-23T06:07:23Z | 2020-06-25T15:50:49Z | 2020-06-25T15:50:48Z | 2020-06-25T16:48:29Z |
DOC: improve explanation of con argument DataFrame.to_sql | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 1404d225eea97..e3dd3f2994f13 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2488,7 +2488,7 @@ def to_sql(
----------
name : str
Name of SQL table.
- con : sqlalchemy.engine.Engine or sqlite3.Connection
+ con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects. The user
is responsible for engine disposal and connection closure for the SQLAlchemy
@@ -2576,18 +2576,27 @@ def to_sql(
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
- >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
- >>> df1.to_sql('users', con=engine, if_exists='append')
+ An `sqlalchemy.engine.Connection` can also be passed to to `con`:
+ >>> with engine.begin() as connection:
+ ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
+ ... df1.to_sql('users', con=connection, if_exists='append')
+
+ This is allowed to support operations that require that the same
+ DBAPI connection is used for the entire operation.
+
+ >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']})
+ >>> df2.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
- (0, 'User 4'), (1, 'User 5')]
+ (0, 'User 4'), (1, 'User 5'), (0, 'User 6'),
+ (1, 'User 7')]
- Overwrite the table with just ``df1``.
+ Overwrite the table with just ``df2``.
- >>> df1.to_sql('users', con=engine, if_exists='replace',
+ >>> df2.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
- [(0, 'User 4'), (1, 'User 5')]
+ [(0, 'User 6'), (1, 'User 7')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
| - [x] closes #34824
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/34944 | 2020-06-23T03:32:56Z | 2020-06-25T05:10:27Z | 2020-06-25T05:10:27Z | 2020-06-26T17:52:11Z |
BUG: exponential moving window covariance fails for multiIndexed DataFrame | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 10dac7e2863f9..bf6f88edb2e43 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1059,6 +1059,7 @@ Groupby/resample/rolling
- Bug in :meth:`SeriesGroupBy.agg` where any column name was accepted in the named aggregation of ``SeriesGroupBy`` previously. The behaviour now allows only ``str`` and callables else would raise ``TypeError``. (:issue:`34422`)
- Bug in :meth:`DataFrame.groupby` lost index, when one of the ``agg`` keys referenced an empty list (:issue:`32580`)
- Bug in :meth:`Rolling.apply` where ``center=True`` was ignored when ``engine='numba'`` was specified (:issue:`34784`)
+- Bug in :meth:`DataFrame.ewm.cov` was throwing ``AssertionError`` for :class:`MultiIndex` inputs (:issue:`34440`)
Reshaping
^^^^^^^^^
diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py
index 413fe648903ac..58e7841d4dde5 100644
--- a/pandas/core/window/common.py
+++ b/pandas/core/window/common.py
@@ -179,7 +179,10 @@ def dataframe_from_int_dict(data, frame_template):
result.index = MultiIndex.from_product(
arg2.columns.levels + [result_index]
)
- result = result.reorder_levels([2, 0, 1]).sort_index()
+ # GH 34440
+ num_levels = len(result.index.levels)
+ new_order = [num_levels - 1] + list(range(num_levels - 1))
+ result = result.reorder_levels(new_order).sort_index()
else:
result.index = MultiIndex.from_product(
[range(len(arg2.columns)), range(len(result_index))]
diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py
index bb305e93a3cf1..e82d4b8cbf770 100644
--- a/pandas/tests/window/test_pairwise.py
+++ b/pandas/tests/window/test_pairwise.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
-from pandas import DataFrame, Series, date_range
+from pandas import DataFrame, MultiIndex, Series, date_range
import pandas._testing as tm
from pandas.core.algorithms import safe_sort
@@ -189,3 +189,28 @@ def test_corr_freq_memory_error(self):
result = s.rolling("12H").corr(s)
expected = Series([np.nan] * 5, index=date_range("2020", periods=5))
tm.assert_series_equal(result, expected)
+
+ def test_cov_mulittindex(self):
+ # GH 34440
+
+ columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])
+ index = range(3)
+ df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns,)
+
+ result = df.ewm(alpha=0.1).cov()
+
+ index = MultiIndex.from_product([range(3), list("ab"), list("xy"), list("AB")])
+ columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])
+ expected = DataFrame(
+ np.vstack(
+ (
+ np.full((8, 8), np.NaN),
+ np.full((8, 8), 32.000000),
+ np.full((8, 8), 63.881919),
+ )
+ ),
+ index=index,
+ columns=columns,
+ )
+
+ tm.assert_frame_equal(result, expected)
| - [x] closes #34440
- [x] tests added\tests passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34943 | 2020-06-23T00:08:38Z | 2020-06-25T23:24:27Z | 2020-06-25T23:24:26Z | 2020-06-26T17:52:56Z |
BUG: reset_index doesn't preserve dtype on empty frame with MultiIndex | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 22b83425b58c2..751909406cb37 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -934,6 +934,7 @@ Indexing
- Bug in :meth:`Series.at` when used with a :class:`MultiIndex` would raise an exception on valid inputs (:issue:`26989`)
- Bug in :meth:`DataFrame.loc` with dictionary of values changes columns with dtype of ``int`` to ``float`` (:issue:`34573`)
- Bug in :meth:`Series.loc` when used with a :class:`MultiIndex` would raise an IndexingError when accessing a None value (:issue:`34318`)
+- Bug in :meth:`DataFrame.reset_index` and :meth:`Series.reset_index` would not preserve data types on an empty :class:`DataFrame` or :class:`Series` with a :class:`MultiIndex` (:issue:`19602`)
Missing
^^^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d12ebeafe8510..023392de54325 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4679,7 +4679,7 @@ def _maybe_casted_values(index, labels=None):
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
- values = np.empty(len(mask))
+ values = np.empty(len(mask), dtype=index.dtype)
values.fill(np.nan)
else:
values = values.take(labels)
diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py
index 6586c19af2539..79442acccb326 100644
--- a/pandas/tests/frame/methods/test_reset_index.py
+++ b/pandas/tests/frame/methods/test_reset_index.py
@@ -297,3 +297,11 @@ def test_reset_index_range(self):
index=RangeIndex(stop=2),
)
tm.assert_frame_equal(result, expected)
+
+
+def test_reset_index_dtypes_on_empty_frame_with_multiindex():
+ # GH 19602 - Preserve dtype on empty DataFrame with MultiIndex
+ idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], ["a", "b"]])
+ result = DataFrame(index=idx)[:0].reset_index().dtypes
+ expected = Series({"level_0": np.int64, "level_1": np.float64, "level_2": object})
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py
index f0c4895ad7c10..a11590d42552d 100644
--- a/pandas/tests/series/methods/test_reset_index.py
+++ b/pandas/tests/series/methods/test_reset_index.py
@@ -108,3 +108,13 @@ def test_reset_index_drop_errors(self):
s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2))
with pytest.raises(KeyError, match="not found"):
s.reset_index("wrong", drop=True)
+
+
+def test_reset_index_dtypes_on_empty_series_with_multiindex():
+ # GH 19602 - Preserve dtype on empty Series with MultiIndex
+ idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], ["a", "b"]])
+ result = Series(dtype=object, index=idx)[:0].reset_index().dtypes
+ expected = Series(
+ {"level_0": np.int64, "level_1": np.float64, "level_2": object, 0: object}
+ )
+ tm.assert_series_equal(result, expected)
| - [x] closes #19602
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34942 | 2020-06-22T23:53:56Z | 2020-06-23T22:07:01Z | 2020-06-23T22:07:01Z | 2020-07-11T16:01:50Z |
TST: disallow bare pytest raises | diff --git a/pandas/tests/frame/methods/test_assign.py b/pandas/tests/frame/methods/test_assign.py
index 63b9f031de188..0ae501d43e742 100644
--- a/pandas/tests/frame/methods/test_assign.py
+++ b/pandas/tests/frame/methods/test_assign.py
@@ -65,9 +65,11 @@ def test_assign_bad(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
# non-keyword argument
- with pytest.raises(TypeError):
+ msg = r"assign\(\) takes 1 positional argument but 2 were given"
+ with pytest.raises(TypeError, match=msg):
df.assign(lambda x: x.A)
- with pytest.raises(AttributeError):
+ msg = "'DataFrame' object has no attribute 'C'"
+ with pytest.raises(AttributeError, match=msg):
df.assign(C=df.A, D=df.A + df.C)
def test_assign_dependent(self):
diff --git a/pandas/tests/frame/methods/test_at_time.py b/pandas/tests/frame/methods/test_at_time.py
index 71368f270147f..ac98d632c5dcd 100644
--- a/pandas/tests/frame/methods/test_at_time.py
+++ b/pandas/tests/frame/methods/test_at_time.py
@@ -65,7 +65,8 @@ def test_at_time_tz(self):
def test_at_time_raises(self):
# GH#20725
df = DataFrame([[1, 2, 3], [4, 5, 6]])
- with pytest.raises(TypeError): # index is not a DatetimeIndex
+ msg = "Index must be DatetimeIndex"
+ with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
df.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
diff --git a/pandas/tests/frame/methods/test_between_time.py b/pandas/tests/frame/methods/test_between_time.py
index b40604b4f4a16..19e802d0fa663 100644
--- a/pandas/tests/frame/methods/test_between_time.py
+++ b/pandas/tests/frame/methods/test_between_time.py
@@ -68,7 +68,8 @@ def test_between_time(self, close_open_fixture):
def test_between_time_raises(self):
# GH#20725
df = DataFrame([[1, 2, 3], [4, 5, 6]])
- with pytest.raises(TypeError): # index is not a DatetimeIndex
+ msg = "Index must be DatetimeIndex"
+ with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
df.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, axis):
diff --git a/pandas/tests/frame/methods/test_first_and_last.py b/pandas/tests/frame/methods/test_first_and_last.py
index 73e4128ddebb9..2b3756969acca 100644
--- a/pandas/tests/frame/methods/test_first_and_last.py
+++ b/pandas/tests/frame/methods/test_first_and_last.py
@@ -31,7 +31,8 @@ def test_first_subset(self):
def test_first_raises(self):
# GH#20725
df = DataFrame([[1, 2, 3], [4, 5, 6]])
- with pytest.raises(TypeError): # index is not a DatetimeIndex
+ msg = "'first' only supports a DatetimeIndex index"
+ with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
df.first("1D")
def test_last_subset(self):
@@ -57,5 +58,6 @@ def test_last_subset(self):
def test_last_raises(self):
# GH20725
df = DataFrame([[1, 2, 3], [4, 5, 6]])
- with pytest.raises(TypeError): # index is not a DatetimeIndex
+ msg = "'last' only supports a DatetimeIndex index"
+ with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
df.last("1D")
diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py
index efb3d719016bb..facb116646573 100644
--- a/pandas/tests/frame/methods/test_interpolate.py
+++ b/pandas/tests/frame/methods/test_interpolate.py
@@ -43,7 +43,14 @@ def test_interp_bad_method(self):
"D": list("abcd"),
}
)
- with pytest.raises(ValueError):
+ msg = (
+ r"method must be one of \['linear', 'time', 'index', 'values', "
+ r"'nearest', 'zero', 'slinear', 'quadratic', 'cubic', "
+ r"'barycentric', 'krogh', 'spline', 'polynomial', "
+ r"'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', "
+ r"'cubicspline'\]. Got 'not_a_method' instead."
+ )
+ with pytest.raises(ValueError, match=msg):
df.interpolate(method="not_a_method")
def test_interp_combo(self):
@@ -67,7 +74,11 @@ def test_interp_combo(self):
def test_interp_nan_idx(self):
df = DataFrame({"A": [1, 2, np.nan, 4], "B": [np.nan, 2, 3, 4]})
df = df.set_index("A")
- with pytest.raises(NotImplementedError):
+ msg = (
+ "Interpolation with NaNs in the index has not been implemented. "
+ "Try filling those NaNs before interpolating."
+ )
+ with pytest.raises(NotImplementedError, match=msg):
df.interpolate(method="values")
@td.skip_if_no_scipy
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 3bcc26e85e347..3b9a724d74c7d 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1314,7 +1314,11 @@ def test_categorical_replace_with_dict(self, replace_dict, final_data):
expected = DataFrame({"a": a, "b": b})
result = df.replace(replace_dict, 3)
tm.assert_frame_equal(result, expected)
- with pytest.raises(AssertionError):
+ msg = (
+ r"Attributes of DataFrame.iloc\[:, 0\] \(column name=\"a\"\) are "
+ "different"
+ )
+ with pytest.raises(AssertionError, match=msg):
# ensure non-inplace call does not affect original
tm.assert_frame_equal(df, expected)
df.replace(replace_dict, 3, inplace=True)
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 89f268f8b6bc6..98a2a33822e3b 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -71,9 +71,14 @@ def test_query_numexpr(self):
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, self.expected2, check_names=False)
else:
- with pytest.raises(ImportError):
+ msg = (
+ r"'numexpr' is not installed or an unsupported version. "
+ r"Cannot use engine='numexpr' for query/eval if 'numexpr' is "
+ r"not installed"
+ )
+ with pytest.raises(ImportError, match=msg):
df.query("A>0", engine="numexpr")
- with pytest.raises(ImportError):
+ with pytest.raises(ImportError, match=msg):
df.eval("A+1", engine="numexpr")
@@ -452,14 +457,16 @@ def test_date_query_with_non_date(self):
result = df.query("dates != nondate", parser=parser, engine=engine)
tm.assert_frame_equal(result, df)
+ msg = r"Invalid comparison between dtype=datetime64\[ns\] and ndarray"
for op in ["<", ">", "<=", ">="]:
- with pytest.raises(TypeError):
+ with pytest.raises(TypeError, match=msg):
df.query(f"dates {op} nondate", parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": range(10), "+": range(3, 13), "r": range(4, 14)})
- with pytest.raises(SyntaxError):
+ msg = "invalid syntax"
+ with pytest.raises(SyntaxError, match=msg):
df.query("i - +", engine=engine, parser=parser)
def test_query_scope(self):
@@ -781,7 +788,8 @@ def test_date_index_query_with_NaT_duplicates(self):
df["dates3"] = date_range("1/1/2014", periods=n)
df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT
df.set_index("dates1", inplace=True, drop=True)
- with pytest.raises(NotImplementedError):
+ msg = r"'BoolOp' nodes are not implemented"
+ with pytest.raises(NotImplementedError, match=msg):
df.query("index < 20130101 < dates3", engine=engine, parser=parser)
def test_nested_scope(self):
@@ -798,7 +806,8 @@ def test_nested_scope(self):
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
- with pytest.raises(SyntaxError):
+ msg = r"The '@' prefix is only supported by the pandas parser"
+ with pytest.raises(SyntaxError, match=msg):
df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser)
with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"):
@@ -867,10 +876,10 @@ def test_str_query_method(self, parser, engine):
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
+ msg = r"'(Not)?In' nodes are not implemented"
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = f"{lhs} {op} {rhs}"
- msg = r"'(Not)?In' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query(
ex,
@@ -908,10 +917,11 @@ def test_str_list_query_method(self, parser, engine):
eq, ne = "==", "!="
ops = 2 * ([eq] + [ne])
+ msg = r"'(Not)?In' nodes are not implemented"
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = f"{lhs} {op} {rhs}"
- with pytest.raises(NotImplementedError):
+ with pytest.raises(NotImplementedError, match=msg):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine, parser=parser)
@@ -946,10 +956,12 @@ def test_query_with_string_columns(self, parser, engine):
expec = df[df.a.isin(df.b) & (df.c < df.d)]
tm.assert_frame_equal(res, expec)
else:
- with pytest.raises(NotImplementedError):
+ msg = r"'(Not)?In' nodes are not implemented"
+ with pytest.raises(NotImplementedError, match=msg):
df.query("a in b", parser=parser, engine=engine)
- with pytest.raises(NotImplementedError):
+ msg = r"'BoolOp' nodes are not implemented"
+ with pytest.raises(NotImplementedError, match=msg):
df.query("a in b and c < d", parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
@@ -1186,15 +1198,18 @@ def test_missing_attribute(self, df):
df.eval("@pd.thing")
def test_failing_quote(self, df):
- with pytest.raises(SyntaxError):
+ msg = r"(Could not convert ).*( to a valid Python identifier.)"
+ with pytest.raises(SyntaxError, match=msg):
df.query("`it's` > `that's`")
def test_failing_character_outside_range(self, df):
- with pytest.raises(SyntaxError):
+ msg = r"(Could not convert ).*( to a valid Python identifier.)"
+ with pytest.raises(SyntaxError, match=msg):
df.query("`☺` > 4")
def test_failing_hashtag(self, df):
- with pytest.raises(SyntaxError):
+ msg = "Failed to parse backticks"
+ with pytest.raises(SyntaxError, match=msg):
df.query("`foo#bar` > 4")
def test_call_non_named_expression(self, df):
| xref #30999
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Changes for:
- pandas/tests/frame/methods/test_assign.py
- pandas/tests/frame/methods/test_at_time.py
- pandas/tests/frame/methods/test_between_time.py
- pandas/tests/frame/methods/test_first_and_last.py
- pandas/tests/frame/methods/test_interpolate.py
- pandas/tests/frame/methods/test_replace.py
- pandas/tests/frame/test_query_eval.py
Belongs to https://github.com/pandas-dev/pandas/issues/30999 | https://api.github.com/repos/pandas-dev/pandas/pulls/34940 | 2020-06-22T21:40:18Z | 2020-06-23T22:08:40Z | 2020-06-23T22:08:39Z | 2020-06-23T22:08:43Z |
HDFStore append_to_multiple with min_itemsize | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 0d2254e401103..6eb61f14d5629 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1027,6 +1027,7 @@ I/O
- :meth:`HDFStore.keys` has now an optional `include` parameter that allows the retrieval of all native HDF5 table names (:issue:`29916`)
- Bug in :meth:`read_excel` for ODS files removes 0.0 values (:issue:`27222`)
- Bug in :meth:`ujson.encode` was raising an `OverflowError` with numbers larger than sys.maxsize (:issue: `34395`)
+- Bug in :meth:`HDFStore.append_to_multiple` was raising a ``ValueError`` when the min_itemsize parameter is set (:issue:`11238`)
Plotting
^^^^^^^^
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 800e9474cc0f8..0e5d7b007bd89 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1303,6 +1303,8 @@ def append_to_multiple(
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
+ min_itemsize = kwargs.pop("min_itemsize", None)
+
# append
for k, v in d.items():
dc = data_columns if k == selector else None
@@ -1310,7 +1312,12 @@ def append_to_multiple(
# compute the val
val = value.reindex(v, axis=axis)
- self.append(k, val, data_columns=dc, **kwargs)
+ filtered = (
+ {key: value for (key, value) in min_itemsize.items() if key in v}
+ if min_itemsize is not None
+ else None
+ )
+ self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
def create_table_index(
self,
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 524e9f41a7731..c69992471fc9b 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -3697,6 +3697,33 @@ def test_append_to_multiple_dropna_false(self, setup_path):
assert not store.select("df1a").index.equals(store.select("df2a").index)
+ def test_append_to_multiple_min_itemsize(self, setup_path):
+ # GH 11238
+ df = pd.DataFrame(
+ {
+ "IX": np.arange(1, 21),
+ "Num": np.arange(1, 21),
+ "BigNum": np.arange(1, 21) * 88,
+ "Str": ["a" for _ in range(20)],
+ "LongStr": ["abcde" for _ in range(20)],
+ }
+ )
+ expected = df.iloc[[0]]
+
+ with ensure_clean_store(setup_path) as store:
+ store.append_to_multiple(
+ {
+ "index": ["IX"],
+ "nums": ["Num", "BigNum"],
+ "strs": ["Str", "LongStr"],
+ },
+ df.iloc[[0]],
+ "index",
+ min_itemsize={"Str": 10, "LongStr": 100, "Num": 2},
+ )
+ result = store.select_as_multiple(["index", "nums", "strs"])
+ tm.assert_frame_equal(result, expected)
+
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
| - [x] closes #11238
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34939 | 2020-06-22T21:00:12Z | 2020-06-25T22:56:46Z | 2020-06-25T22:56:45Z | 2020-06-25T23:07:14Z |
BUG: clear cache on DataFrame._is_homogeneous_dtype | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d12ebeafe8510..d868f503b3ea0 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -611,7 +611,8 @@ def _is_homogeneous_type(self) -> bool:
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
- return not self._mgr.is_mixed_type
+ # Note: consolidates inplace
+ return not self._is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 9c415564fd99a..f3e3ef9bae5c6 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -233,6 +233,18 @@ def test_constructor_list_str_na(self, string_dtype):
def test_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
+ def test_is_homogeneous_type_clears_cache(self):
+ ser = pd.Series([1, 2, 3])
+ df = ser.to_frame("A")
+ df["B"] = ser
+
+ assert len(df._mgr.blocks) == 2
+
+ a = df["B"] # caches lookup
+ df._is_homogeneous_type # _should_ clear cache
+ assert len(df._mgr.blocks) == 1
+ assert df["B"] is not a
+
def test_asarray_homogenous(self):
df = pd.DataFrame({"A": pd.Categorical([1, 2]), "B": pd.Categorical([1, 2])})
result = np.asarray(df)
| - [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34937 | 2020-06-22T20:04:18Z | 2020-06-24T23:35:18Z | 2020-06-24T23:35:18Z | 2020-06-24T23:52:23Z |
REF: simplify _is_single_block/is_mixed_type | diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index 38c495e1dd0f3..0c98a779424bd 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -168,10 +168,6 @@ def _holder(self):
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
- @property
- def _is_single_block(self) -> bool:
- return self.ndim == 1
-
@property
def is_view(self) -> bool:
""" return a boolean if I am possibly a view """
@@ -259,7 +255,7 @@ def make_block_same_class(self, values, placement=None, ndim=None):
def __repr__(self) -> str:
# don't want to print out all of the items here
name = type(self).__name__
- if self._is_single_block:
+ if self.ndim == 1:
result = f"{name}: {len(self)} dtype: {self.dtype}"
else:
@@ -476,8 +472,7 @@ def downcast(self, dtypes=None):
values = self.values
- # single block handling
- if self._is_single_block:
+ if self.ndim == 1:
# try to cast all non-floats here
if dtypes is None:
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py
index eaf59051205d6..6055a6205d286 100644
--- a/pandas/core/internals/managers.py
+++ b/pandas/core/internals/managers.py
@@ -220,16 +220,8 @@ def set_axis(self, axis: int, new_labels: Index) -> None:
@property
def _is_single_block(self) -> bool:
- if self.ndim == 1:
- return True
-
- if len(self.blocks) != 1:
- return False
-
- blk = self.blocks[0]
- return blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(
- 0, len(self), 1
- )
+ # Assumes we are 2D; overriden by SingleBlockManager
+ return len(self.blocks) == 1
def _rebuild_blknos_and_blklocs(self) -> None:
"""
@@ -1486,6 +1478,7 @@ class SingleBlockManager(BlockManager):
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
+ _is_single_block = True
def __init__(
self,
| https://api.github.com/repos/pandas-dev/pandas/pulls/34935 | 2020-06-22T19:52:47Z | 2020-06-23T20:20:29Z | 2020-06-23T20:20:29Z | 2021-11-20T23:22:35Z | |
ENH: add ignore_index option in DataFrame.explode | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 75f406d908c73..028c6793aa9ec 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -318,6 +318,7 @@ Other enhancements
- :meth:`DataFrame.cov` and :meth:`Series.cov` now support a new parameter ddof to support delta degrees of freedom as in the corresponding numpy methods (:issue:`34611`).
- :meth:`DataFrame.to_html` and :meth:`DataFrame.to_string`'s ``col_space`` parameter now accepts a list or dict to change only some specific columns' width (:issue:`28917`).
- :meth:`DataFrame.to_excel` can now also write OpenOffice spreadsheet (.ods) files (:issue:`27222`)
+- :meth:`~Series.explode` now accepts ``ignore_index`` to reset the index, similarly to :meth:`pd.concat` or :meth:`DataFrame.sort_values` (:issue:`34932`).
.. ---------------------------------------------------------------------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 521d16ac0b905..39f93af1670bf 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6939,7 +6939,9 @@ def stack(self, level=-1, dropna=True):
else:
return stack(self, level, dropna=dropna)
- def explode(self, column: Union[str, Tuple]) -> "DataFrame":
+ def explode(
+ self, column: Union[str, Tuple], ignore_index: bool = False
+ ) -> "DataFrame":
"""
Transform each element of a list-like to a row, replicating index values.
@@ -6949,6 +6951,10 @@ def explode(self, column: Union[str, Tuple]) -> "DataFrame":
----------
column : str or tuple
Column to explode.
+ ignore_index : bool, default False
+ If True, the resulting index will be labeled 0, 1, …, n - 1.
+
+ .. versionadded:: 1.1.0
Returns
-------
@@ -7005,7 +7011,10 @@ def explode(self, column: Union[str, Tuple]) -> "DataFrame":
assert df is not None # needed for mypy
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
- result.index = self.index.take(result.index)
+ if ignore_index:
+ result.index = ibase.default_index(len(result))
+ else:
+ result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
diff --git a/pandas/core/series.py b/pandas/core/series.py
index a652af5efc590..54b85afea4964 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3774,12 +3774,19 @@ def reorder_levels(self, order) -> "Series":
result.index = result.index.reorder_levels(order)
return result
- def explode(self) -> "Series":
+ def explode(self, ignore_index: bool = False) -> "Series":
"""
Transform each element of a list-like to a row.
.. versionadded:: 0.25.0
+ Parameters
+ ----------
+ ignore_index : bool, default False
+ If True, the resulting index will be labeled 0, 1, …, n - 1.
+
+ .. versionadded:: 1.1.0
+
Returns
-------
Series
@@ -3826,9 +3833,13 @@ def explode(self) -> "Series":
values, counts = reshape.explode(np.asarray(self.array))
- result = self._constructor(
- values, index=self.index.repeat(counts), name=self.name
- )
+ if ignore_index:
+ index = ibase.default_index(len(values))
+ else:
+ index = self.index.repeat(counts)
+
+ result = self._constructor(values, index=index, name=self.name)
+
return result
def unstack(self, level=-1, fill_value=None):
diff --git a/pandas/tests/frame/methods/test_explode.py b/pandas/tests/frame/methods/test_explode.py
index bad8349ec977b..2bbe8ac2d5b81 100644
--- a/pandas/tests/frame/methods/test_explode.py
+++ b/pandas/tests/frame/methods/test_explode.py
@@ -162,3 +162,13 @@ def test_duplicate_index(input_dict, input_index, expected_dict, expected_index)
result = df.explode("col1")
expected = pd.DataFrame(expected_dict, index=expected_index, dtype=object)
tm.assert_frame_equal(result, expected)
+
+
+def test_ignore_index():
+ # GH 34932
+ df = pd.DataFrame({"id": range(0, 20, 10), "values": [list("ab"), list("cd")]})
+ result = df.explode("values", ignore_index=True)
+ expected = pd.DataFrame(
+ {"id": [0, 0, 10, 10], "values": list("abcd")}, index=[0, 1, 2, 3]
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/methods/test_explode.py b/pandas/tests/series/methods/test_explode.py
index a25cfadf12467..4b65e042f7b02 100644
--- a/pandas/tests/series/methods/test_explode.py
+++ b/pandas/tests/series/methods/test_explode.py
@@ -118,3 +118,11 @@ def test_duplicate_index():
result = s.explode()
expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object)
tm.assert_series_equal(result, expected)
+
+
+def test_ignore_index():
+ # GH 34932
+ s = pd.Series([[1, 2], [3, 4]])
+ result = s.explode(ignore_index=True)
+ expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object)
+ tm.assert_series_equal(result, expected)
| - [x] closes #34932
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34933 | 2020-06-22T11:28:39Z | 2020-06-26T20:22:25Z | 2020-06-26T20:22:24Z | 2020-07-19T21:20:05Z |
BUG: fix IntegerArray astype with copy=True/False | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index 5473b7c1523f3..5dff6d729479a 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1143,6 +1143,7 @@ ExtensionArray
- Fixed bug where :meth:`StringArray.memory_usage` was not implemented (:issue:`33963`)
- Fixed bug where :meth:`DataFrameGroupBy` would ignore the ``min_count`` argument for aggregations on nullable boolean dtypes (:issue:`34051`)
- Fixed bug that `DataFrame(columns=.., dtype='string')` would fail (:issue:`27953`, :issue:`33623`)
+- Fixed bug in ``IntegerArray.astype`` to correctly copy the mask as well (:issue:`34931`).
Other
^^^^^
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py
index 7be7ef3637ee5..b5cb681812939 100644
--- a/pandas/core/arrays/integer.py
+++ b/pandas/core/arrays/integer.py
@@ -448,18 +448,22 @@ def astype(self, dtype, copy: bool = True) -> ArrayLike:
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
- from pandas.core.arrays.boolean import BooleanDtype
+ from pandas.core.arrays.masked import BaseMaskedDtype
from pandas.core.arrays.string_ import StringDtype
dtype = pandas_dtype(dtype)
- # if we are astyping to an existing IntegerDtype we can fastpath
- if isinstance(dtype, _IntegerDtype):
- result = self._data.astype(dtype.numpy_dtype, copy=False)
- return dtype.construct_array_type()(result, mask=self._mask, copy=False)
- elif isinstance(dtype, BooleanDtype):
- result = self._data.astype("bool", copy=False)
- return dtype.construct_array_type()(result, mask=self._mask, copy=False)
+ # if the dtype is exactly the same, we can fastpath
+ if self.dtype == dtype:
+ # return the same object for copy=False
+ return self.copy() if copy else self
+ # if we are astyping to another nullable masked dtype, we can fastpath
+ if isinstance(dtype, BaseMaskedDtype):
+ data = self._data.astype(dtype.numpy_dtype, copy=copy)
+ # mask is copied depending on whether the data was copied, and
+ # not directly depending on the `copy` keyword
+ mask = self._mask if data is self._data else self._mask.copy()
+ return dtype.construct_array_type()(data, mask, copy=False)
elif isinstance(dtype, StringDtype):
return dtype.construct_array_type()._from_sequence(self, copy=False)
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py
index 28add129825d1..235840d6d201e 100644
--- a/pandas/core/arrays/masked.py
+++ b/pandas/core/arrays/masked.py
@@ -40,6 +40,17 @@ class BaseMaskedDtype(ExtensionDtype):
def numpy_dtype(self) -> np.dtype:
raise AbstractMethodError
+ @classmethod
+ def construct_array_type(cls) -> Type["BaseMaskedArray"]:
+ """
+ Return the array type associated with this dtype.
+
+ Returns
+ -------
+ type
+ """
+ raise NotImplementedError
+
class BaseMaskedArray(ExtensionArray, ExtensionOpsMixin):
"""
diff --git a/pandas/tests/arrays/integer/test_dtypes.py b/pandas/tests/arrays/integer/test_dtypes.py
index cafe9e47a18f4..67efa4cb2ce4a 100644
--- a/pandas/tests/arrays/integer/test_dtypes.py
+++ b/pandas/tests/arrays/integer/test_dtypes.py
@@ -144,6 +144,44 @@ def test_astype(all_data):
tm.assert_series_equal(result, expected)
+def test_astype_copy():
+ arr = pd.array([1, 2, 3, None], dtype="Int64")
+ orig = pd.array([1, 2, 3, None], dtype="Int64")
+
+ # copy=True -> ensure both data and mask are actual copies
+ result = arr.astype("Int64", copy=True)
+ assert result is not arr
+ assert not np.shares_memory(result._data, arr._data)
+ assert not np.shares_memory(result._mask, arr._mask)
+ result[0] = 10
+ tm.assert_extension_array_equal(arr, orig)
+ result[0] = pd.NA
+ tm.assert_extension_array_equal(arr, orig)
+
+ # copy=False
+ result = arr.astype("Int64", copy=False)
+ assert result is arr
+ assert np.shares_memory(result._data, arr._data)
+ assert np.shares_memory(result._mask, arr._mask)
+ result[0] = 10
+ assert arr[0] == 10
+ result[0] = pd.NA
+ assert arr[0] is pd.NA
+
+ # astype to different dtype -> always needs a copy -> even with copy=False
+ # we need to ensure that also the mask is actually copied
+ arr = pd.array([1, 2, 3, None], dtype="Int64")
+ orig = pd.array([1, 2, 3, None], dtype="Int64")
+
+ result = arr.astype("Int32", copy=False)
+ assert not np.shares_memory(result._data, arr._data)
+ assert not np.shares_memory(result._mask, arr._mask)
+ result[0] = 10
+ tm.assert_extension_array_equal(arr, orig)
+ result[0] = pd.NA
+ tm.assert_extension_array_equal(arr, orig)
+
+
def test_astype_to_larger_numpy():
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
| xref https://github.com/pandas-dev/pandas/pull/34307#discussion_r443370993
Right now, we were not copying the mask correctly with `copy=True`, and also not when the data was actually copied (then the mask also needs to be copied, even with `copy=False`) | https://api.github.com/repos/pandas-dev/pandas/pulls/34931 | 2020-06-22T10:11:47Z | 2020-07-10T12:14:11Z | 2020-07-10T12:14:11Z | 2020-07-11T17:12:43Z |
CLN: Update Cython data pointers for rolling apply | diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx
index 646444d10e416..ec4a412b5adc7 100644
--- a/pandas/_libs/window/aggregations.pyx
+++ b/pandas/_libs/window/aggregations.pyx
@@ -1377,17 +1377,11 @@ def roll_generic_fixed(object obj,
output[i] = NaN
# remaining full-length windows
- buf = <float64_t *>arr.data
- bufarr = np.empty(win, dtype=float)
- oldbuf = <float64_t *>bufarr.data
- for i in range((win - offset), (N - offset)):
- buf = buf + 1
- bufarr.data = <char *>buf
+ for j, i in enumerate(range((win - offset), (N - offset)), 1):
if counts[i] >= minp:
- output[i] = func(bufarr, *args, **kwargs)
+ output[i] = func(arr[j:j + win], *args, **kwargs)
else:
output[i] = NaN
- bufarr.data = <char *>oldbuf
# truncated windows at the end
for i in range(int_max(N - offset, 0), N):
| - [x] xref #34014
- [x] tests passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Performance looks pretty comparable between master and this PR
```
N = 10 ** 3
arr = 100 * np.random.random(N)
roll = pd.DataFrame(arr).rolling(10)
%timeit roll.apply(lambda x: np.sum(x) + 5, raw=True)
4.48 ms ± 14.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) <--PR
4.34 ms ± 29.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) <--master
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/34930 | 2020-06-22T07:35:46Z | 2020-06-23T16:31:30Z | 2020-06-23T16:31:30Z | 2020-06-23T16:37:22Z |
BUILD: make tests discoverable in .devcontainer.json | diff --git a/.devcontainer.json b/.devcontainer.json
index 315a1ff647012..8bea96aea29c1 100644
--- a/.devcontainer.json
+++ b/.devcontainer.json
@@ -17,7 +17,9 @@
"python.linting.pylintEnabled": false,
"python.linting.mypyEnabled": true,
"python.testing.pytestEnabled": true,
- "python.testing.cwd": "pandas/tests"
+ "python.testing.pytestArgs": [
+ "pandas"
+ ]
},
// Add the IDs of extensions you want installed when the container is created in the array below.
| I wasn't able to discover tests using the current setup, I had to use "python.testing.pytestArgs" instead of "python.testing.cwd" | https://api.github.com/repos/pandas-dev/pandas/pulls/34929 | 2020-06-22T06:47:33Z | 2020-06-23T17:33:09Z | 2020-06-23T17:33:09Z | 2020-06-23T17:50:12Z |
Fix DataFrame/Series stack/unstack docs | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index d12ebeafe8510..7b5938f44c523 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -6739,8 +6739,6 @@ def stack(self, level=-1, dropna=True):
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
- The new index levels are sorted.
-
Parameters
----------
level : int, str, list, default -1
@@ -6976,8 +6974,6 @@ def unstack(self, level=-1, fill_value=None):
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
- The level involved will automatically get sorted.
-
Parameters
----------
level : int, str, or list of these, default -1 (last level)
diff --git a/pandas/core/series.py b/pandas/core/series.py
index cab8dd133b579..2ec645983e84f 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -3823,8 +3823,6 @@ def unstack(self, level=-1, fill_value=None):
"""
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
- The level involved will automatically get sorted.
-
Parameters
----------
level : int, str, or list of these, default last level
| - [x] closes #21675
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
As discussed in the issue, the sorted level is an implementation detail that does not always happen. Thus I think we should remove the misleading documentation. | https://api.github.com/repos/pandas-dev/pandas/pulls/34927 | 2020-06-22T01:57:55Z | 2020-06-23T20:13:52Z | 2020-06-23T20:13:52Z | 2020-06-24T15:00:52Z |
DOC: Fix language style | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index c9ac275cc4ea7..5ffda03fad80f 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -16,7 +16,7 @@
from .pandas_vb_common import tm
-method_blacklist = {
+method_blocklist = {
"object": {
"median",
"prod",
@@ -403,7 +403,7 @@ class GroupByMethods:
]
def setup(self, dtype, method, application):
- if method in method_blacklist.get(dtype, {}):
+ if method in method_blocklist.get(dtype, {}):
raise NotImplementedError # skip benchmark
ngroups = 1000
size = ngroups * 2
diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index f7a513ca22d53..7b12de387d648 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -248,19 +248,19 @@ fi
### CODE ###
if [[ -z "$CHECK" || "$CHECK" == "code" ]]; then
- MSG='Check import. No warnings, and blacklist some optional dependencies' ; echo $MSG
+ MSG='Check import. No warnings, and blocklist some optional dependencies' ; echo $MSG
python -W error -c "
import sys
import pandas
-blacklist = {'bs4', 'gcsfs', 'html5lib', 'http', 'ipython', 'jinja2', 'hypothesis',
+blocklist = {'bs4', 'gcsfs', 'html5lib', 'http', 'ipython', 'jinja2', 'hypothesis',
'lxml', 'matplotlib', 'numexpr', 'openpyxl', 'py', 'pytest', 's3fs', 'scipy',
'tables', 'urllib.request', 'xlrd', 'xlsxwriter', 'xlwt'}
# GH#28227 for some of these check for top-level modules, while others are
# more specific (e.g. urllib.request)
import_mods = set(m.split('.')[0] for m in sys.modules) | set(sys.modules)
-mods = blacklist & import_mods
+mods = blocklist & import_mods
if mods:
sys.stderr.write('err: pandas should not import: {}\n'.format(', '.join(mods)))
sys.exit(len(mods))
diff --git a/doc/source/whatsnew/v0.14.1.rst b/doc/source/whatsnew/v0.14.1.rst
index 3dfc4272681df..5de193007474c 100644
--- a/doc/source/whatsnew/v0.14.1.rst
+++ b/doc/source/whatsnew/v0.14.1.rst
@@ -131,7 +131,7 @@ Enhancements
- Implemented ``sem`` (standard error of the mean) operation for ``Series``,
``DataFrame``, ``Panel``, and ``Groupby`` (:issue:`6897`)
-- Add ``nlargest`` and ``nsmallest`` to the ``Series`` ``groupby`` whitelist,
+- Add ``nlargest`` and ``nsmallest`` to the ``Series`` ``groupby`` allowlist,
which means you can now use these methods on a ``SeriesGroupBy`` object
(:issue:`7053`).
- All offsets ``apply``, ``rollforward`` and ``rollback`` can now handle ``np.datetime64``, previously results in ``ApplyTypeError`` (:issue:`7452`)
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py
index 08352d737dee0..e71b2f94c8014 100644
--- a/pandas/core/groupby/base.py
+++ b/pandas/core/groupby/base.py
@@ -1,6 +1,6 @@
"""
Provide basic components for groupby. These definitions
-hold the whitelist of methods that are exposed on the
+hold the allowlist of methods that are exposed on the
SeriesGroupBy and the DataFrameGroupBy objects.
"""
import collections
@@ -53,7 +53,7 @@ def _gotitem(self, key, ndim, subset=None):
# forwarding methods from NDFrames
plotting_methods = frozenset(["plot", "hist"])
-common_apply_whitelist = (
+common_apply_allowlist = (
frozenset(
[
"quantile",
@@ -72,9 +72,9 @@ def _gotitem(self, key, ndim, subset=None):
| plotting_methods
)
-series_apply_whitelist = (
+series_apply_allowlist = (
(
- common_apply_whitelist
+ common_apply_allowlist
| {
"nlargest",
"nsmallest",
@@ -84,13 +84,13 @@ def _gotitem(self, key, ndim, subset=None):
)
) | frozenset(["dtype", "unique"])
-dataframe_apply_whitelist = common_apply_whitelist | frozenset(["dtypes", "corrwith"])
+dataframe_apply_allowlist = common_apply_allowlist | frozenset(["dtypes", "corrwith"])
# cythonized transformations or canned "agg+broadcast", which do not
# require postprocessing of the result by transform.
cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"])
-cython_cast_blacklist = frozenset(["rank", "count", "size", "idxmin", "idxmax"])
+cython_cast_blocklist = frozenset(["rank", "count", "size", "idxmin", "idxmax"])
# List of aggregation/reduction functions.
# These map each group to a single numeric value
@@ -186,4 +186,4 @@ def _gotitem(self, key, ndim, subset=None):
# Valid values of `name` for `groupby.transform(name)`
# NOTE: do NOT edit this directly. New additions should be inserted
# into the appropriate list above.
-transform_kernel_whitelist = reduction_kernels | transformation_kernels
+transform_kernel_allowlist = reduction_kernels | transformation_kernels
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index bc5cf595e49f9..dab8475d9580c 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -121,15 +121,15 @@ def prop(self):
return property(prop)
-def pin_whitelisted_properties(klass: Type[FrameOrSeries], whitelist: FrozenSet[str]):
+def pin_allowlisted_properties(klass: Type[FrameOrSeries], allowlist: FrozenSet[str]):
"""
- Create GroupBy member defs for DataFrame/Series names in a whitelist.
+ Create GroupBy member defs for DataFrame/Series names in a allowlist.
Parameters
----------
klass : DataFrame or Series class
class where members are defined.
- whitelist : frozenset[str]
+ allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
@@ -143,7 +143,7 @@ class decorator
"""
def pinner(cls):
- for name in whitelist:
+ for name in allowlist:
if hasattr(cls, name):
# don't override anything that was explicitly defined
# in the base class
@@ -157,9 +157,9 @@ def pinner(cls):
return pinner
-@pin_whitelisted_properties(Series, base.series_apply_whitelist)
+@pin_allowlisted_properties(Series, base.series_apply_allowlist)
class SeriesGroupBy(GroupBy[Series]):
- _apply_whitelist = base.series_apply_whitelist
+ _apply_allowlist = base.series_apply_allowlist
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
@@ -473,7 +473,7 @@ def transform(self, func, *args, engine="cython", engine_kwargs=None, **kwargs):
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
- elif func not in base.transform_kernel_whitelist:
+ elif func not in base.transform_kernel_allowlist:
msg = f"'{func}' is not a valid function name for transform(name)"
raise ValueError(msg)
elif func in base.cythonized_kernels:
@@ -835,10 +835,10 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
return (filled / shifted) - 1
-@pin_whitelisted_properties(DataFrame, base.dataframe_apply_whitelist)
+@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)
class DataFrameGroupBy(GroupBy[DataFrame]):
- _apply_whitelist = base.dataframe_apply_whitelist
+ _apply_allowlist = base.dataframe_apply_allowlist
_agg_examples_doc = dedent(
"""
@@ -1456,7 +1456,7 @@ def transform(self, func, *args, engine="cython", engine_kwargs=None, **kwargs):
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
- elif func not in base.transform_kernel_whitelist:
+ elif func not in base.transform_kernel_allowlist:
msg = f"'{func}' is not a valid function name for transform(name)"
raise ValueError(msg)
elif func in base.cythonized_kernels:
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index 02f7f605a7605..d039b715b3c08 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -475,7 +475,7 @@ def _group_selection_context(groupby):
class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
- _apply_whitelist: FrozenSet[str] = frozenset()
+ _apply_allowlist: FrozenSet[str] = frozenset()
def __init__(
self,
@@ -689,7 +689,7 @@ def _set_result_index_ordered(self, result):
return result
def _dir_additions(self):
- return self.obj._dir_additions() | self._apply_whitelist
+ return self.obj._dir_additions() | self._apply_allowlist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
@@ -729,7 +729,7 @@ def pipe(self, func, *args, **kwargs):
plot = property(GroupByPlot)
def _make_wrapper(self, name):
- assert name in self._apply_whitelist
+ assert name in self._apply_allowlist
self._set_group_selection()
@@ -944,7 +944,7 @@ def _transform_should_cast(self, func_nm: str) -> bool:
"""
filled_series = self.grouper.size().fillna(0)
assert filled_series is not None
- return filled_series.gt(0).any() and func_nm not in base.cython_cast_blacklist
+ return filled_series.gt(0).any() and func_nm not in base.cython_cast_blocklist
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_allowlist.py
similarity index 90%
rename from pandas/tests/groupby/test_whitelist.py
rename to pandas/tests/groupby/test_allowlist.py
index 9b595328d9230..0fd66cc047017 100644
--- a/pandas/tests/groupby/test_whitelist.py
+++ b/pandas/tests/groupby/test_allowlist.py
@@ -31,7 +31,7 @@
]
AGG_FUNCTIONS_WITH_SKIPNA = ["skew", "mad"]
-df_whitelist = [
+df_allowlist = [
"quantile",
"fillna",
"mad",
@@ -50,12 +50,12 @@
]
-@pytest.fixture(params=df_whitelist)
-def df_whitelist_fixture(request):
+@pytest.fixture(params=df_allowlist)
+def df_allowlist_fixture(request):
return request.param
-s_whitelist = [
+s_allowlist = [
"quantile",
"fillna",
"mad",
@@ -78,8 +78,8 @@ def df_whitelist_fixture(request):
]
-@pytest.fixture(params=s_whitelist)
-def s_whitelist_fixture(request):
+@pytest.fixture(params=s_allowlist)
+def s_allowlist_fixture(request):
return request.param
@@ -119,10 +119,10 @@ def df_letters():
return df
-@pytest.mark.parametrize("whitelist", [df_whitelist, s_whitelist])
-def test_groupby_whitelist(df_letters, whitelist):
+@pytest.mark.parametrize("allowlist", [df_allowlist, s_allowlist])
+def test_groupby_allowlist(df_letters, allowlist):
df = df_letters
- if whitelist == df_whitelist:
+ if allowlist == df_allowlist:
# dataframe
obj = df_letters
else:
@@ -130,11 +130,11 @@ def test_groupby_whitelist(df_letters, whitelist):
gb = obj.groupby(df.letters)
- assert set(whitelist) == set(gb._apply_whitelist)
+ assert set(allowlist) == set(gb._apply_allowlist)
-def check_whitelist(obj, df, m):
- # check the obj for a particular whitelist m
+def check_allowlist(obj, df, m):
+ # check the obj for a particular allowlist m
gb = obj.groupby(df.letters)
@@ -155,16 +155,16 @@ def check_whitelist(obj, df, m):
assert n.endswith(m)
-def test_groupby_series_whitelist(df_letters, s_whitelist_fixture):
- m = s_whitelist_fixture
+def test_groupby_series_allowlist(df_letters, s_allowlist_fixture):
+ m = s_allowlist_fixture
df = df_letters
- check_whitelist(df.letters, df, m)
+ check_allowlist(df.letters, df, m)
-def test_groupby_frame_whitelist(df_letters, df_whitelist_fixture):
- m = df_whitelist_fixture
+def test_groupby_frame_allowlist(df_letters, df_allowlist_fixture):
+ m = df_allowlist_fixture
df = df_letters
- check_whitelist(df, df, m)
+ check_allowlist(df, df, m)
@pytest.fixture
@@ -187,10 +187,10 @@ def raw_frame():
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("sort", [True, False])
-def test_regression_whitelist_methods(raw_frame, op, level, axis, skipna, sort):
+def test_regression_allowlist_methods(raw_frame, op, level, axis, skipna, sort):
# GH6944
# GH 17537
- # explicitly test the whitelist methods
+ # explicitly test the allowlist methods
if axis == 0:
frame = raw_frame
@@ -213,11 +213,11 @@ def test_regression_whitelist_methods(raw_frame, op, level, axis, skipna, sort):
tm.assert_frame_equal(result, expected)
-def test_groupby_blacklist(df_letters):
+def test_groupby_blocklist(df_letters):
df = df_letters
s = df_letters.floats
- blacklist = [
+ blocklist = [
"eval",
"query",
"abs",
@@ -234,9 +234,9 @@ def test_groupby_blacklist(df_letters):
]
to_methods = [method for method in dir(df) if method.startswith("to_")]
- blacklist.extend(to_methods)
+ blocklist.extend(to_methods)
- for bl in blacklist:
+ for bl in blocklist:
for obj in (df, s):
gb = obj.groupby(df.letters)
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py
index fd4ee2a81ebd8..cdaf27e214d80 100644
--- a/pandas/tests/groupby/transform/test_transform.py
+++ b/pandas/tests/groupby/transform/test_transform.py
@@ -728,7 +728,7 @@ def test_cython_transform_frame(op, args, targop):
# dict(by=['int','string'])]:
gb = df.groupby(**gb_target)
- # whitelisted methods set the selection before applying
+ # allowlisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == "shift":
| - Ref: https://9to5google.com/2020/06/12/google-android-chrome-blacklist-blocklist-more-inclusive/
- Ref: https://android-review.googlesource.com/c/platform/frameworks/ml/+/970739
cc @pandas-dev/pandas-core | https://api.github.com/repos/pandas-dev/pandas/pulls/34924 | 2020-06-21T21:19:04Z | 2020-06-23T20:16:39Z | 2020-06-23T20:16:39Z | 2020-06-23T20:50:06Z |
TST : Added test for creating empty dataframe with column of type str… | diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 39cab3d5ec0b8..02a871666c78d 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -1643,6 +1643,12 @@ def test_constructor_empty_with_string_dtype(self):
df = DataFrame(index=[0, 1], columns=[0, 1], dtype="U5")
tm.assert_frame_equal(df, expected)
+ def test_constructor_empty_with_string_extension(self):
+ # GH 34915
+ expected = DataFrame(index=[], columns=["c1"], dtype="string")
+ df = DataFrame(columns=["c1"], dtype="string")
+ tm.assert_frame_equal(df, expected)
+
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0.0, index=[1, 2, 3], columns=["a", "b", "c"])
| - [x] closes #34915
- [x] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34920 | 2020-06-21T11:12:13Z | 2020-06-23T17:45:33Z | 2020-06-23T17:45:33Z | 2020-06-23T17:45:33Z |
TST: Verify whether non writable numpy array is shiftable (21049) | diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py
index 6257eecf4fc08..da6407c73104c 100644
--- a/pandas/tests/series/methods/test_shift.py
+++ b/pandas/tests/series/methods/test_shift.py
@@ -344,3 +344,16 @@ def test_shift_preserve_freqstr(self, periods):
index=pd.date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "input_data, output_data",
+ [(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
+ )
+ def test_shift_non_writable_array(self, input_data, output_data):
+ # GH21049 Verify whether non writable numpy array is shiftable
+ input_data.setflags(write=False)
+
+ result = pd.Series(input_data).shift(1)
+ expected = pd.Series(output_data, dtype="float64")
+
+ tm.assert_series_equal(result, expected)
| - [x ] closes #21049
- [x ] tests added / passed
- [x ] passes `black pandas`
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34919 | 2020-06-21T09:31:55Z | 2020-06-23T17:35:57Z | 2020-06-23T17:35:57Z | 2020-06-23T17:36:04Z |
BUG: indexing regression with datetime index | diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 513ca039366cb..350f86b4e9fd0 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -650,3 +650,13 @@ def test_loc_with_list_of_strings_representing_datetimes_not_matched_type(
s[labels]
with pytest.raises(KeyError, match=msg):
df.loc[labels]
+
+ def test_indexing_timeseries_regression(self):
+ # Issue 34860
+ arr = date_range("1/1/2008", "1/1/2009")
+ result = arr.to_series()["2008"]
+
+ rng = date_range(start="2008-01-01", end="2008-12-31")
+ expected = Series(rng, index=rng)
+
+ tm.assert_series_equal(result, expected)
| The bug is already fixed on master. I have just added a unit test.
- [x] closes #34860
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
| https://api.github.com/repos/pandas-dev/pandas/pulls/34917 | 2020-06-21T03:58:14Z | 2020-06-24T16:00:50Z | 2020-06-24T16:00:50Z | 2020-06-24T16:00:55Z |
REF: dont use compute_reduction | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 0a274d8becd72..90cb0e2e1be4c 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -220,13 +220,7 @@ def apply_empty_result(self):
def apply_raw(self):
""" apply to the values as a numpy array """
- result, reduction_success = libreduction.compute_reduction(
- self.values, self.f, axis=self.axis
- )
-
- # We expect np.apply_along_axis to give a two-dimensional result, or raise.
- if not reduction_success:
- result = np.apply_along_axis(self.f, self.axis, self.values)
+ result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index 48a141a657cbb..8f0d3d9fbc734 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -745,9 +745,6 @@ def non_reducing_function(row):
df.apply(func, axis=1)
assert names == list(df.index)
- @pytest.mark.xfail(
- reason="The 'run once' enhancement for apply_raw not implemented yet."
- )
def test_apply_raw_function_runs_once(self):
# https://github.com/pandas-dev/pandas/issues/34506
| Full asv run shows no change.
This gets rid of one usage of libreduction.compute_reduction; #34909 gets rid of the other. | https://api.github.com/repos/pandas-dev/pandas/pulls/34913 | 2020-06-20T19:55:20Z | 2020-06-20T23:16:13Z | 2020-06-20T23:16:12Z | 2020-06-22T16:37:30Z |
ENH: specificy missing labels in loc calls GH34272 | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index f6ad3a800283d..9c78c8b2cbebc 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -13,6 +13,15 @@ including other versions of pandas.
Enhancements
~~~~~~~~~~~~
+.. _whatsnew_110.specify_missing_labels:
+
+KeyErrors raised by loc specify missing labels
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Previously, if labels were missing for a loc call, a KeyError was raised stating that this was no longer supported.
+
+Now the error message also includes a list of the missing labels (max 10 items, display width 80 characters). See :issue:`34272`.
+
+
.. _whatsnew_110.astype_string:
All dtypes can now be converted to ``StringDtype``
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 9c8b01003bece..3cf20b68c84f4 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -2,6 +2,8 @@
import numpy as np
+from pandas._config.config import option_context
+
from pandas._libs.indexing import _NDFrameIndexerBase
from pandas._libs.lib import item_from_zerodim
from pandas.errors import AbstractMethodError, InvalidIndexError
@@ -1283,7 +1285,8 @@ def _validate_read_indexer(
return
# Count missing values:
- missing = (indexer < 0).sum()
+ missing_mask = indexer < 0
+ missing = (missing_mask).sum()
if missing:
if missing == len(indexer):
@@ -1302,11 +1305,15 @@ def _validate_read_indexer(
# code, so we want to avoid warning & then
# just raising
if not ax.is_categorical():
- raise KeyError(
- "Passing list-likes to .loc or [] with any missing labels "
- "is no longer supported, see "
- "https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
- )
+ not_found = key[missing_mask]
+
+ with option_context("display.max_seq_items", 10, "display.width", 80):
+ raise KeyError(
+ "Passing list-likes to .loc or [] with any missing labels "
+ "is no longer supported. "
+ f"The following labels were missing: {not_found}. "
+ "See https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501
+ )
@doc(IndexingMixin.iloc)
diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py
index 5c0230e75021c..b77c47f927517 100644
--- a/pandas/tests/indexing/test_indexing.py
+++ b/pandas/tests/indexing/test_indexing.py
@@ -1075,3 +1075,32 @@ def test_setitem_with_bool_mask_and_values_matching_n_trues_in_length():
result = ser
expected = pd.Series([None] * 3 + list(range(5)) + [None] * 2).astype("object")
tm.assert_series_equal(result, expected)
+
+
+def test_missing_labels_inside_loc_matched_in_error_message():
+ # GH34272
+ s = pd.Series({"a": 1, "b": 2, "c": 3})
+ error_message_regex = "missing_0.*missing_1.*missing_2"
+ with pytest.raises(KeyError, match=error_message_regex):
+ s.loc[["a", "b", "missing_0", "c", "missing_1", "missing_2"]]
+
+
+def test_many_missing_labels_inside_loc_error_message_limited():
+ # GH34272
+ n = 10000
+ missing_labels = [f"missing_{label}" for label in range(n)]
+ s = pd.Series({"a": 1, "b": 2, "c": 3})
+ # regex checks labels between 4 and 9995 are replaced with ellipses
+ error_message_regex = "missing_4.*\\.\\.\\..*missing_9995"
+ with pytest.raises(KeyError, match=error_message_regex):
+ s.loc[["a", "c"] + missing_labels]
+
+
+def test_long_text_missing_labels_inside_loc_error_message_limited():
+ # GH34272
+ s = pd.Series({"a": 1, "b": 2, "c": 3})
+ missing_labels = [f"long_missing_label_text_{i}" * 5 for i in range(3)]
+ # regex checks for very long labels there are new lines between each
+ error_message_regex = "long_missing_label_text_0.*\\\\n.*long_missing_label_text_1"
+ with pytest.raises(KeyError, match=error_message_regex):
+ s.loc[["a", "c"] + missing_labels]
| - [ ] closes #34272
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34912 | 2020-06-20T18:48:43Z | 2020-06-26T20:21:17Z | 2020-06-26T20:21:17Z | 2020-06-26T20:23:40Z |
TST: Verify whether Datetime subclasses are also of dtype datetime | diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index baac87755c6d2..756f3fec82b84 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -2539,6 +2539,14 @@ def test_from_M8_structured(self):
assert isinstance(s[0], Timestamp)
assert s[0] == dates[0][0]
+ def test_from_datetime_subclass(self):
+ # GH21142 Verify whether Datetime subclasses are also of dtype datetime
+ class DatetimeSubclass(datetime):
+ pass
+
+ data = pd.DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]})
+ assert data.datetime.dtype == "datetime64[ns]"
+
class TestDataFrameConstructorWithDatetimeTZ:
def test_from_dict(self):
| - [x] closes #21142
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34911 | 2020-06-20T17:23:13Z | 2020-06-20T22:17:52Z | 2020-06-20T22:17:52Z | 2020-06-20T22:17:52Z |
DOC: explain EWM | diff --git a/doc/redirects.csv b/doc/redirects.csv
index b59ccf649ee21..bceb4b5961324 100644
--- a/doc/redirects.csv
+++ b/doc/redirects.csv
@@ -269,11 +269,11 @@ generated/pandas.core.resample.Resampler.std,../reference/api/pandas.core.resamp
generated/pandas.core.resample.Resampler.sum,../reference/api/pandas.core.resample.Resampler.sum
generated/pandas.core.resample.Resampler.transform,../reference/api/pandas.core.resample.Resampler.transform
generated/pandas.core.resample.Resampler.var,../reference/api/pandas.core.resample.Resampler.var
-generated/pandas.core.window.EWM.corr,../reference/api/pandas.core.window.EWM.corr
-generated/pandas.core.window.EWM.cov,../reference/api/pandas.core.window.EWM.cov
-generated/pandas.core.window.EWM.mean,../reference/api/pandas.core.window.EWM.mean
-generated/pandas.core.window.EWM.std,../reference/api/pandas.core.window.EWM.std
-generated/pandas.core.window.EWM.var,../reference/api/pandas.core.window.EWM.var
+generated/pandas.core.window.ExponentialMovingWindow.corr,../reference/api/pandas.core.window.ExponentialMovingWindow.corr
+generated/pandas.core.window.ExponentialMovingWindow.cov,../reference/api/pandas.core.window.ExponentialMovingWindow.cov
+generated/pandas.core.window.ExponentialMovingWindow.mean,../reference/api/pandas.core.window.ExponentialMovingWindow.mean
+generated/pandas.core.window.ExponentialMovingWindow.std,../reference/api/pandas.core.window.ExponentialMovingWindow.std
+generated/pandas.core.window.ExponentialMovingWindow.var,../reference/api/pandas.core.window.ExponentialMovingWindow.var
generated/pandas.core.window.Expanding.aggregate,../reference/api/pandas.core.window.Expanding.aggregate
generated/pandas.core.window.Expanding.apply,../reference/api/pandas.core.window.Expanding.apply
generated/pandas.core.window.Expanding.corr,../reference/api/pandas.core.window.Expanding.corr
diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst
index fb60a0d387ca2..d7e6405a3732b 100644
--- a/doc/source/reference/window.rst
+++ b/doc/source/reference/window.rst
@@ -8,7 +8,7 @@ Window
Rolling objects are returned by ``.rolling`` calls: :func:`pandas.DataFrame.rolling`, :func:`pandas.Series.rolling`, etc.
Expanding objects are returned by ``.expanding`` calls: :func:`pandas.DataFrame.expanding`, :func:`pandas.Series.expanding`, etc.
-EWM objects are returned by ``.ewm`` calls: :func:`pandas.DataFrame.ewm`, :func:`pandas.Series.ewm`, etc.
+ExponentialMovingWindow objects are returned by ``.ewm`` calls: :func:`pandas.DataFrame.ewm`, :func:`pandas.Series.ewm`, etc.
Standard moving window functions
--------------------------------
@@ -69,11 +69,11 @@ Exponentially-weighted moving window functions
.. autosummary::
:toctree: api/
- EWM.mean
- EWM.std
- EWM.var
- EWM.corr
- EWM.cov
+ ExponentialMovingWindow.mean
+ ExponentialMovingWindow.std
+ ExponentialMovingWindow.var
+ ExponentialMovingWindow.corr
+ ExponentialMovingWindow.cov
Window indexer
--------------
diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst
index cf630a9671013..19fdb541a6a45 100644
--- a/doc/source/user_guide/computation.rst
+++ b/doc/source/user_guide/computation.rst
@@ -230,7 +230,7 @@ see the :ref:`groupby docs <groupby.transform.window_resample>`.
The API for window statistics is quite similar to the way one works with ``GroupBy`` objects, see the documentation :ref:`here <groupby>`.
We work with ``rolling``, ``expanding`` and ``exponentially weighted`` data through the corresponding
-objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expanding` and :class:`~pandas.core.window.EWM`.
+objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expanding` and :class:`~pandas.core.window.ExponentialMovingWindow`.
.. ipython:: python
@@ -777,7 +777,7 @@ columns by reshaping and indexing:
Aggregation
-----------
-Once the ``Rolling``, ``Expanding`` or ``EWM`` objects have been created, several methods are available to
+Once the ``Rolling``, ``Expanding`` or ``ExponentialMovingWindow`` objects have been created, several methods are available to
perform multiple computations on the data. These operations are similar to the :ref:`aggregating API <basics.aggregate>`,
:ref:`groupby API <groupby.aggregate>`, and :ref:`resample API <timeseries.aggregate>`.
@@ -971,7 +971,7 @@ Exponentially weighted windows
A related set of functions are exponentially weighted versions of several of
the above statistics. A similar interface to ``.rolling`` and ``.expanding`` is accessed
-through the ``.ewm`` method to receive an :class:`~EWM` object.
+through the ``.ewm`` method to receive an :class:`~ExponentialMovingWindow` object.
A number of expanding EW (exponentially weighted)
methods are provided:
@@ -980,11 +980,11 @@ methods are provided:
:header: "Function", "Description"
:widths: 20, 80
- :meth:`~EWM.mean`, EW moving average
- :meth:`~EWM.var`, EW moving variance
- :meth:`~EWM.std`, EW moving standard deviation
- :meth:`~EWM.corr`, EW moving correlation
- :meth:`~EWM.cov`, EW moving covariance
+ :meth:`~ExponentialMovingWindow.mean`, EW moving average
+ :meth:`~ExponentialMovingWindow.var`, EW moving variance
+ :meth:`~ExponentialMovingWindow.std`, EW moving standard deviation
+ :meth:`~ExponentialMovingWindow.corr`, EW moving correlation
+ :meth:`~ExponentialMovingWindow.cov`, EW moving covariance
In general, a weighted moving average is calculated as
@@ -1090,12 +1090,12 @@ Here is an example for a univariate time series:
@savefig ewma_ex.png
s.ewm(span=20).mean().plot(style='k')
-EWM has a ``min_periods`` argument, which has the same
+ExponentialMovingWindow has a ``min_periods`` argument, which has the same
meaning it does for all the ``.expanding`` and ``.rolling`` methods:
no output values will be set until at least ``min_periods`` non-null values
are encountered in the (expanding) window.
-EWM also has an ``ignore_na`` argument, which determines how
+ExponentialMovingWindow also has an ``ignore_na`` argument, which determines how
intermediate null values affect the calculation of the weights.
When ``ignore_na=False`` (the default), weights are calculated based on absolute
positions, so that intermediate null values affect the result.
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 44558fd63ba15..3cd920158f774 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -1206,7 +1206,7 @@ Groupby/resample/rolling
- Bug in :meth:`pandas.core.groupby.GroupBy.agg` where incorrect results are returned for uint64 columns. (:issue:`26310`)
- Bug in :meth:`pandas.core.window.Rolling.median` and :meth:`pandas.core.window.Rolling.quantile` where MemoryError is raised with empty window (:issue:`26005`)
- Bug in :meth:`pandas.core.window.Rolling.median` and :meth:`pandas.core.window.Rolling.quantile` where incorrect results are returned with ``closed='left'`` and ``closed='neither'`` (:issue:`26005`)
-- Improved :class:`pandas.core.window.Rolling`, :class:`pandas.core.window.Window` and :class:`pandas.core.window.EWM` functions to exclude nuisance columns from results instead of raising errors and raise a ``DataError`` only if all columns are nuisance (:issue:`12537`)
+- Improved :class:`pandas.core.window.Rolling`, :class:`pandas.core.window.Window` and :class:`pandas.core.window.ExponentialMovingWindow` functions to exclude nuisance columns from results instead of raising errors and raise a ``DataError`` only if all columns are nuisance (:issue:`12537`)
- Bug in :meth:`pandas.core.window.Rolling.max` and :meth:`pandas.core.window.Rolling.min` where incorrect results are returned with an empty variable window (:issue:`26005`)
- Raise a helpful exception when an unsupported weighted window function is used as an argument of :meth:`pandas.core.window.Window.aggregate` (:issue:`26597`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 39ca7ed47f7fa..d12ebeafe8510 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -7288,7 +7288,7 @@ def _gotitem(
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
- core.window.EWM : Perform operation over exponential weighted
+ core.window.ExponentialMovingWindow : Perform operation over exponential weighted
window.
"""
)
@@ -8171,7 +8171,7 @@ def cov(self, min_periods=None) -> "DataFrame":
See Also
--------
Series.cov : Compute covariance with another Series.
- core.window.EWM.cov: Exponential weighted sample covariance.
+ core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 701909c9df857..1404d225eea97 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -10460,7 +10460,12 @@ def _add_series_or_dataframe_operations(cls):
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
- from pandas.core.window import EWM, Expanding, Rolling, Window
+ from pandas.core.window import (
+ Expanding,
+ ExponentialMovingWindow,
+ Rolling,
+ Window,
+ )
@doc(Rolling)
def rolling(
@@ -10507,7 +10512,7 @@ def expanding(self, min_periods=1, center=False, axis=0):
cls.expanding = expanding
- @doc(EWM)
+ @doc(ExponentialMovingWindow)
def ewm(
self,
com=None,
@@ -10520,7 +10525,7 @@ def ewm(
axis=0,
):
axis = self._get_axis_number(axis)
- return EWM(
+ return ExponentialMovingWindow(
self,
com=com,
span=span,
diff --git a/pandas/core/window/__init__.py b/pandas/core/window/__init__.py
index dcf58a4c0dd5b..304c61ac0e489 100644
--- a/pandas/core/window/__init__.py
+++ b/pandas/core/window/__init__.py
@@ -1,3 +1,3 @@
-from pandas.core.window.ewm import EWM # noqa:F401
+from pandas.core.window.ewm import ExponentialMovingWindow # noqa:F401
from pandas.core.window.expanding import Expanding, ExpandingGroupby # noqa:F401
from pandas.core.window.rolling import Rolling, RollingGroupby, Window # noqa:F401
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py
index b708020be90d2..ee80f80b320e4 100644
--- a/pandas/core/window/ewm.py
+++ b/pandas/core/window/ewm.py
@@ -59,7 +59,7 @@ def get_center_of_mass(
return float(comass)
-class EWM(_Rolling):
+class ExponentialMovingWindow(_Rolling):
r"""
Provide exponential weighted (EW) functions.
@@ -185,7 +185,7 @@ def __init__(
@property
def _constructor(self):
- return EWM
+ return ExponentialMovingWindow
_agg_see_also_doc = dedent(
"""
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index 9ba194dcf0959..0957cac7aff95 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -4,7 +4,7 @@
from pandas.errors import UnsupportedFunctionCall
from pandas import DataFrame, Series
-from pandas.core.window import EWM
+from pandas.core.window import ExponentialMovingWindow
def test_doc_string():
@@ -56,7 +56,7 @@ def test_constructor(which):
@pytest.mark.parametrize("method", ["std", "mean", "var"])
def test_numpy_compat(method):
# see gh-12811
- e = EWM(Series([2, 4, 6]), alpha=0.5)
+ e = ExponentialMovingWindow(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
| - [ ] closes #34867
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34910 | 2020-06-20T17:20:06Z | 2020-06-20T22:20:59Z | 2020-06-20T22:20:59Z | 2020-06-21T08:14:13Z |
PERF: avoid creating many Series in apply_standard | diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 0a274d8becd72..e39915cdce2e7 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -4,16 +4,13 @@
import numpy as np
+from pandas._config import option_context
+
from pandas._libs import reduction as libreduction
from pandas._typing import Axis
from pandas.util._decorators import cache_readonly
-from pandas.core.dtypes.common import (
- is_dict_like,
- is_extension_array_dtype,
- is_list_like,
- is_sequence,
-)
+from pandas.core.dtypes.common import is_dict_like, is_list_like, is_sequence
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.construction import create_series_with_explicit_dtype
@@ -266,53 +263,6 @@ def apply_standard(self):
# partial result that may be returned from reduction
partial_result = None
- # try to reduce first (by default)
- # this only matters if the reduction in values is of different dtype
- # e.g. if we want to apply to a SparseFrame, then can't directly reduce
-
- # we cannot reduce using non-numpy dtypes,
- # as demonstrated in gh-12244
- if (
- self.result_type in ["reduce", None]
- and not self.dtypes.apply(is_extension_array_dtype).any()
- # Disallow dtypes where setting _index_data will break
- # ExtensionArray values, see GH#31182
- and not self.dtypes.apply(lambda x: x.kind in ["m", "M"]).any()
- # Disallow complex_internals since libreduction shortcut raises a TypeError
- and not self.agg_axis._has_complex_internals
- ):
-
- values = self.values
- index = self.obj._get_axis(self.axis)
- labels = self.agg_axis
- empty_arr = np.empty(len(index), dtype=values.dtype)
-
- # Preserve subclass for e.g. test_subclassed_apply
- dummy = self.obj._constructor_sliced(
- empty_arr, index=index, dtype=values.dtype
- )
-
- try:
- result, reduction_success = libreduction.compute_reduction(
- values, self.f, axis=self.axis, dummy=dummy, labels=labels
- )
- except TypeError:
- # e.g. test_apply_ignore_failures we just ignore
- if not self.ignore_failures:
- raise
- except ZeroDivisionError:
- # reached via numexpr; fall back to python implementation
- pass
- else:
- if reduction_success:
- return self.obj._constructor_sliced(result, index=labels)
-
- # no exceptions - however reduction was unsuccessful,
- # use the computed function result for first element
- partial_result = result[0]
- if isinstance(partial_result, ABCSeries):
- partial_result = partial_result.infer_objects()
-
# compute the result using the series generator,
# use the result computed while trying to reduce if available.
results, res_index = self.apply_series_generator(partial_result)
@@ -350,7 +300,14 @@ def apply_series_generator(self, partial_result=None) -> Tuple[ResType, "Index"]
else:
for i, v in series_gen_enumeration:
- results[i] = self.f(v)
+ with option_context("mode.chained_assignment", None):
+ # ignore SettingWithCopy here in case the user mutates
+ results[i] = self.f(v)
+
+ if isinstance(results[i], ABCSeries):
+ # If we have a view on v, we need to make a copy because
+ # series_generator will swap out the underlying data
+ results[i] = results[i].copy(deep=False)
return results, res_index
@@ -361,7 +318,6 @@ def wrap_results(
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
-
return self.wrap_results_for_axis(results, res_index)
# dict of scalars
@@ -401,9 +357,30 @@ def result_columns(self) -> "Index":
def wrap_results_for_axis(
self, results: ResType, res_index: "Index"
- ) -> "DataFrame":
+ ) -> Union["Series", "DataFrame"]:
""" return the results for the rows """
- result = self.obj._constructor(data=results)
+
+ if self.result_type == "reduce":
+ # e.g. test_apply_dict GH#8735
+ return self.obj._constructor_sliced(results)
+ elif self.result_type is None and all(
+ isinstance(x, dict) for x in results.values()
+ ):
+ # Our operation was a to_dict op e.g.
+ # test_apply_dict GH#8735, test_apply_reduce_rows_to_dict GH#25196
+ return self.obj._constructor_sliced(results)
+
+ try:
+ result = self.obj._constructor(data=results)
+ except ValueError as err:
+ if "arrays must all be same length" in str(err):
+ # e.g. result = [[2, 3], [1.5], ['foo', 'bar']]
+ # see test_agg_listlike_result GH#29587
+ res = self.obj._constructor_sliced(results)
+ res.index = res_index
+ return res
+ else:
+ raise
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
@@ -424,11 +401,19 @@ def apply_broadcast(self, target: "DataFrame") -> "DataFrame":
@property
def series_generator(self):
- constructor = self.obj._constructor_sliced
- return (
- constructor(arr, index=self.columns, name=name)
- for i, (arr, name) in enumerate(zip(self.values, self.index))
- )
+ values = self.values
+ assert len(values) > 0
+
+ # We create one Series object, and will swap out the data inside
+ # of it. Kids: don't do this at home.
+ ser = self.obj._ixs(0, axis=0)
+ mgr = ser._mgr
+ blk = mgr.blocks[0]
+
+ for (arr, name) in zip(values, self.index):
+ blk.values = arr
+ ser.name = name
+ yield ser
@property
def result_index(self) -> "Index":
@@ -450,9 +435,7 @@ def wrap_results_for_axis(
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
- from pandas import Series
-
- result = Series(results)
+ result = self.obj._constructor_sliced(results)
result.index = res_index
# we may want to infer results
| - [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `black pandas`
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
This avoids going through perennial-problem-causing libreduction code (xref #34014, #34080) and instead does the same trick in python-space by re-assigning block.values instead of creating new Series objects.
If we avoid libreduction but _dont_ do this optimization, the most-affected asv is `time_apply_ref_by_name` that clocks in at 6.92x slower. This achieves parity on that asv.
<s>ATM I'm still getting 4 test failures locally, need to troubleshoot.</s> Update: passing
| https://api.github.com/repos/pandas-dev/pandas/pulls/34909 | 2020-06-20T16:25:52Z | 2020-06-25T23:06:11Z | 2020-06-25T23:06:11Z | 2020-07-08T15:42:54Z |
BUG: incorrect type when indexing sparse dataframe with iterable | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index cee41f248fc60..386fe3ce2160f 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1124,6 +1124,7 @@ Sparse
- Bug where :class:`DataFrame` containing :class:`SparseArray` filled with ``NaN`` when indexed by a list-like (:issue:`27781`, :issue:`29563`)
- The repr of :class:`SparseDtype` now includes the repr of its ``fill_value`` attribute. Previously it used ``fill_value``'s string representation (:issue:`34352`)
- Bug where empty :class:`DataFrame` could not be cast to :class:`SparseDtype` (:issue:`33113`)
+- Bug in :meth:`arrays.SparseArray` was returning the incorrect type when indexing a sparse dataframe with an iterable (:issue:`34526`, :issue:`34540`)
ExtensionArray
^^^^^^^^^^^^^^
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py
index 4996a10002c63..b18a58da3950f 100644
--- a/pandas/core/arrays/sparse/array.py
+++ b/pandas/core/arrays/sparse/array.py
@@ -866,11 +866,8 @@ def _take_with_fill(self, indices, fill_value=None) -> np.ndarray:
if self.sp_index.npoints == 0:
# Avoid taking from the empty self.sp_values
- taken = np.full(
- sp_indexer.shape,
- fill_value=fill_value,
- dtype=np.result_type(type(fill_value)),
- )
+ _dtype = np.result_type(self.dtype.subtype, type(fill_value))
+ taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype)
else:
taken = self.sp_values.take(sp_indexer)
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py
index 3865ea64ee479..3fa3c9303806f 100644
--- a/pandas/tests/frame/indexing/test_indexing.py
+++ b/pandas/tests/frame/indexing/test_indexing.py
@@ -21,7 +21,6 @@
notna,
)
import pandas._testing as tm
-from pandas.arrays import SparseArray
import pandas.core.common as com
from pandas.core.indexing import IndexingError
@@ -1907,20 +1906,6 @@ def test_getitem_ix_float_duplicates(self):
expect = df.iloc[[1, -1], 0]
tm.assert_series_equal(df.loc[0.2, "a"], expect)
- def test_getitem_sparse_column(self):
- # https://github.com/pandas-dev/pandas/issues/23559
- data = SparseArray([0, 1])
- df = pd.DataFrame({"A": data})
- expected = pd.Series(data, name="A")
- result = df["A"]
- tm.assert_series_equal(result, expected)
-
- result = df.iloc[:, 0]
- tm.assert_series_equal(result, expected)
-
- result = df.loc[:, "A"]
- tm.assert_series_equal(result, expected)
-
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
diff --git a/pandas/tests/frame/indexing/test_sparse.py b/pandas/tests/frame/indexing/test_sparse.py
new file mode 100644
index 0000000000000..876fbe212c466
--- /dev/null
+++ b/pandas/tests/frame/indexing/test_sparse.py
@@ -0,0 +1,51 @@
+import numpy as np
+import pytest
+
+import pandas.util._test_decorators as td
+
+import pandas as pd
+import pandas._testing as tm
+from pandas.arrays import SparseArray
+from pandas.core.arrays.sparse import SparseDtype
+
+
+class TestSparseDataFrameIndexing:
+ def test_getitem_sparse_column(self):
+ # https://github.com/pandas-dev/pandas/issues/23559
+ data = SparseArray([0, 1])
+ df = pd.DataFrame({"A": data})
+ expected = pd.Series(data, name="A")
+ result = df["A"]
+ tm.assert_series_equal(result, expected)
+
+ result = df.iloc[:, 0]
+ tm.assert_series_equal(result, expected)
+
+ result = df.loc[:, "A"]
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"])
+ @pytest.mark.parametrize("dtype", [np.int64, np.float64, complex])
+ @td.skip_if_no_scipy
+ def test_locindexer_from_spmatrix(self, spmatrix_t, dtype):
+ import scipy.sparse
+
+ spmatrix_t = getattr(scipy.sparse, spmatrix_t)
+
+ # The bug is triggered by a sparse matrix with purely sparse columns. So the
+ # recipe below generates a rectangular matrix of dimension (5, 7) where all the
+ # diagonal cells are ones, meaning the last two columns are purely sparse.
+ rows, cols = 5, 7
+ spmatrix = spmatrix_t(np.eye(rows, cols, dtype=dtype), dtype=dtype)
+ df = pd.DataFrame.sparse.from_spmatrix(spmatrix)
+
+ # regression test for #34526
+ itr_idx = range(2, rows)
+ result = df.loc[itr_idx].values
+ expected = spmatrix.toarray()[itr_idx]
+ tm.assert_numpy_array_equal(result, expected)
+
+ # regression test for #34540
+ result = df.loc[itr_idx].dtypes.values
+ expected = np.full(cols, SparseDtype(dtype, fill_value=0))
+ tm.assert_numpy_array_equal(result, expected)
| closes #34526
closes #34540
- [x] 2 tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
The problem arose when indexing with an iterable. If the result consisted of columns which originally only had sparse values, the `dtype` was solely inferred from the `fill_value`, which defaults to `0`. Hence, the resulting columns have the dtype ` Sparse[int64, 0]`. This commit changes the type inference logic to use the numpy type promotion rules between the underlying subtype of the `SparseArray.dtype` and the type of the fill value. | https://api.github.com/repos/pandas-dev/pandas/pulls/34908 | 2020-06-20T16:04:08Z | 2020-07-08T11:47:10Z | 2020-07-08T11:47:08Z | 2020-07-08T11:47:20Z |
TST: pandas/test/window/ changes for #30999 | diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py
index c6a92c0ad47b6..89d46a8bb6cb5 100644
--- a/pandas/tests/window/moments/test_moments_ewm.py
+++ b/pandas/tests/window/moments/test_moments_ewm.py
@@ -116,10 +116,12 @@ def test_ewma_span_com_args(series):
A = series.ewm(com=9.5).mean()
B = series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
-
- with pytest.raises(ValueError):
+ msg = "comass, span, halflife, and alpha are mutually exclusive"
+ with pytest.raises(ValueError, match=msg):
series.ewm(com=9.5, span=20)
- with pytest.raises(ValueError):
+
+ msg = "Must pass one of comass, span, halflife, or alpha"
+ with pytest.raises(ValueError, match=msg):
series.ewm().mean()
@@ -127,8 +129,8 @@ def test_ewma_halflife_arg(series):
A = series.ewm(com=13.932726172912965).mean()
B = series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
-
- with pytest.raises(ValueError):
+ msg = "comass, span, halflife, and alpha are mutually exclusive"
+ with pytest.raises(ValueError, match=msg):
series.ewm(span=20, halflife=50)
with pytest.raises(ValueError):
series.ewm(com=9.5, halflife=50)
@@ -153,13 +155,16 @@ def test_ewm_alpha(arr):
def test_ewm_alpha_arg(series):
# GH 10789
s = series
- with pytest.raises(ValueError):
+ msg = "Must pass one of comass, span, halflife, or alpha"
+ with pytest.raises(ValueError, match=msg):
s.ewm()
- with pytest.raises(ValueError):
+
+ msg = "comass, span, halflife, and alpha are mutually exclusive"
+ with pytest.raises(ValueError, match=msg):
s.ewm(com=10.0, alpha=0.5)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
s.ewm(span=10.0, alpha=0.5)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
s.ewm(halflife=10.0, alpha=0.5)
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index f6e2834965da3..81f020fe7de23 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -198,7 +198,8 @@ def test_centered_axis_validation():
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
- with pytest.raises(ValueError):
+ msg = "No axis named 1 for object type Series"
+ with pytest.raises(ValueError, match=msg):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
@@ -206,7 +207,8 @@ def test_centered_axis_validation():
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
- with pytest.raises(ValueError):
+ msg = "No axis named 2 for object type DataFrame"
+ with pytest.raises(ValueError, match=msg):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
@@ -743,8 +745,8 @@ def test_rolling_min(raw, series, frame):
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
-
- with pytest.raises(ValueError):
+ msg = "min_periods 5 must be <= window 3"
+ with pytest.raises(ValueError, match=msg):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
@@ -754,8 +756,8 @@ def test_rolling_max(raw, series, frame):
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
-
- with pytest.raises(ValueError):
+ msg = "min_periods 5 must be <= window 3"
+ with pytest.raises(ValueError, match=msg):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@@ -841,14 +843,16 @@ def test_invalid_quantile_value():
def test_rolling_quantile_param():
ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
-
- with pytest.raises(ValueError):
+ msg = "quantile value -0.1 not in \\[0, 1\\]"
+ with pytest.raises(ValueError, match=msg):
ser.rolling(3).quantile(-0.1)
- with pytest.raises(ValueError):
+ msg = "quantile value 10.0 not in \\[0, 1\\]"
+ with pytest.raises(ValueError, match=msg):
ser.rolling(3).quantile(10.0)
- with pytest.raises(TypeError):
+ msg = "must be real number, not str"
+ with pytest.raises(TypeError, match=msg):
ser.rolling(3).quantile("foo")
diff --git a/pandas/tests/window/test_dtypes.py b/pandas/tests/window/test_dtypes.py
index b1c9b66ab09d3..0aa5bf019ff5e 100644
--- a/pandas/tests/window/test_dtypes.py
+++ b/pandas/tests/window/test_dtypes.py
@@ -220,7 +220,8 @@ def check_dtypes(self, f, f_name, d, d_name, exp):
tm.assert_almost_equal(result, exp)
else:
- with pytest.raises(DataError):
+ msg = "No numeric types to aggregate"
+ with pytest.raises(DataError, match=msg):
f(roll)
diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py
index 0957cac7aff95..44015597ddb19 100644
--- a/pandas/tests/window/test_ewm.py
+++ b/pandas/tests/window/test_ewm.py
@@ -28,28 +28,33 @@ def test_constructor(which):
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
- with pytest.raises(ValueError):
+ msg = "comass, span, halflife, and alpha are mutually exclusive"
+ with pytest.raises(ValueError, match=msg):
c(com=0.5, alpha=0.5)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
c(span=1.5, halflife=0.75)
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
c(alpha=0.5, span=1.5)
# not valid: com < 0
- with pytest.raises(ValueError):
+ msg = "comass must satisfy: comass >= 0"
+ with pytest.raises(ValueError, match=msg):
c(com=-0.5)
# not valid: span < 1
- with pytest.raises(ValueError):
+ msg = "span must satisfy: span >= 1"
+ with pytest.raises(ValueError, match=msg):
c(span=0.5)
# not valid: halflife <= 0
- with pytest.raises(ValueError):
+ msg = "halflife must satisfy: halflife > 0"
+ with pytest.raises(ValueError, match=msg):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
+ msg = "alpha must satisfy: 0 < alpha <= 1"
for alpha in (-0.5, 1.5):
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
c(alpha=alpha)
diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py
index b57467385d371..30d65ebe84a1f 100644
--- a/pandas/tests/window/test_expanding.py
+++ b/pandas/tests/window/test_expanding.py
@@ -28,9 +28,12 @@ def test_constructor(which):
# not valid
for w in [2.0, "foo", np.array([2])]:
- with pytest.raises(ValueError):
+ msg = "min_periods must be an integer"
+ with pytest.raises(ValueError, match=msg):
c(min_periods=w)
- with pytest.raises(ValueError):
+
+ msg = "center must be a boolean"
+ with pytest.raises(ValueError, match=msg):
c(min_periods=1, center=w)
diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py
index 0c5289cd78fed..8aa4d7103e48a 100644
--- a/pandas/tests/window/test_timeseries_window.py
+++ b/pandas/tests/window/test_timeseries_window.py
@@ -55,28 +55,35 @@ def test_valid(self):
df = self.regular
# not a valid freq
- with pytest.raises(ValueError):
+ msg = "passed window foobar is not compatible with a datetimelike index"
+ with pytest.raises(ValueError, match=msg):
df.rolling(window="foobar")
-
# not a datetimelike index
- with pytest.raises(ValueError):
+ msg = "window must be an integer"
+ with pytest.raises(ValueError, match=msg):
df.reset_index().rolling(window="foobar")
# non-fixed freqs
+ msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
for freq in ["2MS", offsets.MonthBegin(2)]:
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
df.rolling(window=freq)
for freq in ["1D", offsets.Day(2), "2ms"]:
df.rolling(window=freq)
# non-integer min_periods
+ msg = (
+ r"local variable 'minp' referenced before assignment|"
+ "min_periods must be an integer"
+ )
for minp in [1.0, "foo", np.array([1, 2, 3])]:
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match=msg):
df.rolling(window="1D", min_periods=minp)
# center is not implemented
- with pytest.raises(NotImplementedError):
+ msg = "center is not implemented for datetimelike and offset based windows"
+ with pytest.raises(NotImplementedError, match=msg):
df.rolling(window="1D", center=True)
def test_on(self):
@@ -84,7 +91,11 @@ def test_on(self):
df = self.regular
# not a valid column
- with pytest.raises(ValueError):
+ msg = (
+ r"invalid on specified as foobar, must be a column "
+ "\\(of DataFrame\\), an Index or None"
+ )
+ with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
@@ -93,7 +104,8 @@ def test_on(self):
df.rolling(window="2d", on="C").sum()
# invalid columns
- with pytest.raises(ValueError):
+ msg = "window must be an integer"
+ with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
@@ -125,11 +137,17 @@ def test_non_monotonic_on(self):
assert not df.index.is_monotonic
- with pytest.raises(ValueError):
+ msg = "index must be monotonic"
+ with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
- with pytest.raises(ValueError):
+
+ msg = (
+ r"invalid on specified as A, must be a column "
+ "\\(of DataFrame\\), an Index or None"
+ )
+ with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
@@ -254,7 +272,8 @@ def test_closed(self):
)
# closed must be 'right', 'left', 'both', 'neither'
- with pytest.raises(ValueError):
+ msg = "closed must be 'right', 'left', 'both' or 'neither'"
+ with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="2s", closed="blabla")
expected = df.copy()
| - [ ] xref #30999
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
I've made changes for:
- tests/window/moments/test_moments_rolling.py
- tests/window/test_dtypes.py
- tests/window/test_ewm.py
- tests/window/test_expanding.py
- tests/window/test_timeseries_windows.py
In a couple of places there were comments stating the raised error's output. The `msg` variable contains the same information. Should I remove these coments?
Belongs to issue: https://github.com/pandas-dev/pandas/issues/30999 | https://api.github.com/repos/pandas-dev/pandas/pulls/34907 | 2020-06-20T15:43:54Z | 2020-06-23T19:28:53Z | 2020-06-23T19:28:53Z | 2020-06-23T19:29:03Z |
BUG: Groupby with as_index=True causes incorrect summarization | diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py
index 6f19ec40c2520..e693962e57ac3 100644
--- a/pandas/tests/groupby/test_function.py
+++ b/pandas/tests/groupby/test_function.py
@@ -85,6 +85,24 @@ def test_max_min_non_numeric():
assert "ss" in result
+def test_min_date_with_nans():
+ # GH26321
+ dates = pd.to_datetime(
+ pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
+ ).dt.date
+ df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
+
+ result = df.groupby("b", as_index=False)["c"].min()["c"]
+ expected = pd.to_datetime(
+ pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
+ ).dt.date
+ tm.assert_series_equal(result, expected)
+
+ result = df.groupby("b")["c"].min()
+ expected.index.name = "b"
+ tm.assert_series_equal(result, expected)
+
+
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
| - [x] closes #26321
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/34906 | 2020-06-20T15:40:14Z | 2020-07-16T16:08:55Z | 2020-07-16T16:08:55Z | 2020-07-16T18:02:54Z |
BUG: plotting layout patch | diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst
index f6ad3a800283d..90d90bc4cd445 100644
--- a/doc/source/whatsnew/v1.1.0.rst
+++ b/doc/source/whatsnew/v1.1.0.rst
@@ -1006,6 +1006,7 @@ Plotting
- Bug in :meth:`DataFrame.hist` where the order of ``column`` argument was ignored (:issue:`29235`)
- Bug in :meth:`DataFrame.plot.scatter` that when adding multiple plots with different ``cmap``, colorbars alway use the first ``cmap`` (:issue:`33389`)
- Bug in :meth:`DataFrame.plot.scatter` was adding a colorbar to the plot even if the argument `c` was assigned to a column containing color names (:issue:`34316`)
+- Bug in :meth:`pandas.plotting.bootstrap_plot` was causing cluttered axes and overlapping labels (:issue:`34905`)
Groupby/resample/rolling
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py
index 0cafcfed38a54..bb6530b0f6412 100644
--- a/pandas/plotting/_matplotlib/misc.py
+++ b/pandas/plotting/_matplotlib/misc.py
@@ -301,6 +301,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
+ plt.tight_layout()
return fig
| This fixes a layout problem.
| https://api.github.com/repos/pandas-dev/pandas/pulls/34905 | 2020-06-20T15:32:42Z | 2020-06-23T22:36:21Z | 2020-06-23T22:36:21Z | 2020-06-23T22:36:25Z |
TST:add test for df replace GH34871 | diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py
index 3bcc26e85e347..49cc892aa00d7 100644
--- a/pandas/tests/frame/methods/test_replace.py
+++ b/pandas/tests/frame/methods/test_replace.py
@@ -1403,3 +1403,16 @@ def test_replace_with_duplicate_columns(self, replacement):
result["B"] = result["B"].replace(7, replacement)
tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.xfail(
+ reason="replace() changes dtype from period to object, see GH34871", strict=True
+ )
+ def test_replace_period_ignore_float(self):
+ """
+ Regression test for GH#34871: if df.replace(1.0, 0.0) is called on a df
+ with a Period column the old, faulty behavior is to raise TypeError.
+ """
+ df = pd.DataFrame({"Per": [pd.Period("2020-01")] * 3})
+ result = df.replace(1.0, 0.0)
+ expected = pd.DataFrame({"Per": [pd.Period("2020-01")] * 3})
+ tm.assert_frame_equal(expected, result)
| - [x] improves upon #34871
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry <---- is that helpful for this specific regression test? happy to write this if you think this helps! :)
add tiny regression test for not failing on calling df.replace() with Period column
working on this revealed other quirks of df.replace(): it now does not raise an exception anymore, and the dataframe returned by replace() has the right values. However, the dtype of a Period column changes to Object, which is surprising.
I've added the observations to GH34871 | https://api.github.com/repos/pandas-dev/pandas/pulls/34904 | 2020-06-20T14:53:44Z | 2020-06-24T15:59:24Z | 2020-06-24T15:59:24Z | 2020-06-26T06:06:33Z |
rework Series.sort is_view check on underlying ndarray before inplace sort (GH5856) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 425f6dfe36990..c8e5d700ac4a0 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -56,6 +56,9 @@ New features
API Changes
~~~~~~~~~~~
+ - ``Series.sort`` will raise a ``ValueError`` (rather than a ``TypeError``) on sorting an
+ object that is a view of another (:issue:`5856`, :issue:`5853`)
+
.. _release.bug_fixes-0.13.1:
Experimental Features
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 5544cd0b34e3c..0f49c976d00a3 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1604,10 +1604,15 @@ def _ixs(self, i, axis=0, copy=False):
values = self._data.iget(i)
if not len(values):
values = np.array([np.nan] * len(self.index), dtype=object)
- return self._constructor_sliced.from_array(
+ result = self._constructor_sliced.from_array(
values, index=self.index,
name=label, fastpath=True)
+ # this is a cached value, mark it so
+ result._set_as_cached(i, self)
+
+ return result
+
def iget_value(self, i, j):
return self.iat[i, j]
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 61235862534f0..92539e7deb5d7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -983,9 +983,14 @@ def _get_item_cache(self, item):
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
- res._cacher = (item, weakref.ref(self))
+ res._set_as_cached(item, self)
return res
+ def _set_as_cached(self, item, cacher):
+ """ set the _cacher attribute on the calling object with
+ a weakref to cacher """
+ self._cacher = (item, weakref.ref(cacher))
+
def _box_item_values(self, key, values):
raise NotImplementedError
@@ -994,6 +999,12 @@ def _maybe_cache_changed(self, item, value):
maybe it has changed """
self._data.set(item, value)
+ @property
+ def _is_cached(self):
+ """ boolean : return if I am cached """
+ cacher = getattr(self, '_cacher', None)
+ return cacher is not None
+
def _maybe_update_cacher(self, clear=False):
""" see if we need to update our parent cacher
if clear, then clear our cache """
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f147eb87d7480..c310358ab58f9 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1664,20 +1664,16 @@ def sort(self, axis=0, kind='quicksort', order=None, ascending=True):
--------
pandas.Series.order
"""
- sortedSeries = self.order(na_last=True, kind=kind,
- ascending=ascending)
- true_base = self.values
- while true_base.base is not None:
- true_base = true_base.base
+ # GH 5856/5863
+ if self._is_cached:
+ raise ValueError("This Series is a view of some other array, to "
+ "sort in-place you must create a copy")
- if (true_base is not None and
- (true_base.ndim != 1 or true_base.shape != self.shape)):
- raise TypeError('This Series is a view of some other array, to '
- 'sort in-place you must create a copy')
+ result = self.order(na_last=True, kind=kind,
+ ascending=ascending)
- self._data = sortedSeries._data.copy()
- self.index = sortedSeries.index
+ self._update_inplace(result)
def sort_index(self, ascending=True):
"""
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index ef6990337bbbb..bded2fad36763 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -9547,7 +9547,7 @@ def test_sort_datetimes(self):
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
- with assertRaisesRegexp(TypeError, "This Series is a view"):
+ with assertRaisesRegexp(ValueError, "This Series is a view"):
s.sort()
cp = s.copy()
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index c48b4b84698b6..a5270fbbecf00 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -2019,6 +2019,20 @@ def f():
zed['eyes']['right'].fillna(value=555, inplace=True)
self.assertRaises(com.SettingWithCopyError, f)
+ # GH 5856/5863
+ # Series.sort operating on a view
+ df = DataFrame(np.random.randn(10,4))
+ s = df.iloc[:,0]
+ def f():
+ s.sort()
+ self.assertRaises(ValueError, f)
+
+ df = DataFrame(np.random.randn(10,4))
+ s = df.iloc[:,0]
+ s = s.order()
+ assert_series_equal(s,df.iloc[:,0].order())
+ assert_series_equal(s,df[0].order())
+
pd.set_option('chained_assignment','warn')
def test_float64index_slicing_bug(self):
| closes #5856
related #5853
DataFrame._ixs will properly record a cache change (similar to _get_item_cache)
| https://api.github.com/repos/pandas-dev/pandas/pulls/5859 | 2014-01-05T15:26:43Z | 2014-01-06T12:25:15Z | 2014-01-06T12:25:15Z | 2014-06-25T17:54:20Z |
ENH: Refactor code to add is_view method for Series. | diff --git a/doc/source/v0.13.1.txt b/doc/source/v0.13.1.txt
index 250adffdadbca..0ea6b161107e7 100644
--- a/doc/source/v0.13.1.txt
+++ b/doc/source/v0.13.1.txt
@@ -29,6 +29,8 @@ Deprecations
Enhancements
~~~~~~~~~~~~
+Added an ``is_view`` method to Series.
+
Experimental
~~~~~~~~~~~~
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f147eb87d7480..ec4fd3aa18b94 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1645,6 +1645,20 @@ def update(self, other):
#----------------------------------------------------------------------
# Reindexing, sorting
+ def is_view(self):
+ """
+ Return True if series is a view of some other array, False otherwise.
+ """
+ true_base = self.values
+ while true_base.base is not None:
+ true_base = true_base.base
+
+ if (true_base is not None and
+ (true_base.ndim != 1 or true_base.shape != self.shape)):
+ return True
+ return False
+
+
def sort(self, axis=0, kind='quicksort', order=None, ascending=True):
"""
Sort values and index labels by value, in place. For compatibility with
@@ -1667,12 +1681,7 @@ def sort(self, axis=0, kind='quicksort', order=None, ascending=True):
sortedSeries = self.order(na_last=True, kind=kind,
ascending=ascending)
- true_base = self.values
- while true_base.base is not None:
- true_base = true_base.base
-
- if (true_base is not None and
- (true_base.ndim != 1 or true_base.shape != self.shape)):
+ if self.is_view():
raise TypeError('This Series is a view of some other array, to '
'sort in-place you must create a copy')
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index e8b421608fc0a..281e88b7bdba4 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5548,6 +5548,13 @@ def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort()
+def test_is_view():
+ df = tm.makeDataFrame()
+ view = df['A'].is_view()
+ tm.assert_equal(view, True)
+ ser = tm.makeStringSeries()
+ view = ser.is_view()
+ tm.assert_equal(view, False)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| So you can check if a series is a view instead of waiting to get an error on sort or copying by default.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5853 | 2014-01-05T03:37:50Z | 2014-01-05T04:30:22Z | 2014-01-05T04:30:22Z | 2014-06-18T19:59:46Z |
ENH: enhancements to Panel.apply to enable arbitrary functions and multi-dim slicing (GH1148) | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 4ecde7e05256a..79f5af74c3985 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -785,6 +785,7 @@ Attributes and underlying data
Panel.axes
Panel.ndim
Panel.shape
+ Panel.dtypes
Conversion
~~~~~~~~~~
@@ -1122,7 +1123,7 @@ Indexing, iteration
~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
-
+
GroupBy.__iter__
GroupBy.groups
GroupBy.indices
@@ -1141,7 +1142,7 @@ Computations / Descriptive Stats
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
-
+
GroupBy.mean
GroupBy.median
GroupBy.std
@@ -1155,7 +1156,7 @@ Computations / Descriptive Stats
.. toctree::
:hidden:
-
+
generated/pandas.core.common.isnull
generated/pandas.core.common.notnull
generated/pandas.core.reshape.get_dummies
diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index eef271be74a02..bd2980c2f1c9f 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -637,6 +637,81 @@ to :ref:`merging/joining functionality <merging>`:
s
s.map(t)
+
+.. _basics.apply_panel:
+
+Applying with a Panel
+~~~~~~~~~~~~~~~~~~~~~
+
+Applying with a ``Panel`` will pass a ``Series`` to the applied function. If the applied
+function returns a ``Series``, the result of the application will be a ``Panel``. If the applied function
+reduces to a scalar, the result of the application will be a ``DataFrame``.
+
+.. note::
+
+ Prior to 0.13.1 ``apply`` on a ``Panel`` would only work on ``ufuncs`` (e.g. ``np.sum/np.max``).
+
+.. ipython:: python
+
+ import pandas.util.testing as tm
+ panel = tm.makePanel(5)
+ panel
+ panel['ItemA']
+
+A transformational apply.
+
+.. ipython:: python
+
+ result = panel.apply(lambda x: x*2, axis='items')
+ result
+ result['ItemA']
+
+A reduction operation.
+
+.. ipython:: python
+
+ panel.apply(lambda x: x.dtype, axis='items')
+
+A similar reduction type operation
+
+.. ipython:: python
+
+ panel.apply(lambda x: x.sum(), axis='major_axis')
+
+This last reduction is equivalent to
+
+.. ipython:: python
+
+ panel.sum('major_axis')
+
+A transformation operation that returns a ``Panel``, but is computing
+the z-score across the ``major_axis``.
+
+.. ipython:: python
+
+ result = panel.apply(lambda x: (x-x.mean())/x.std(), axis='major_axis')
+ result
+ result['ItemA']
+
+Apply can also accept multiple axes in the ``axis`` argument. This will pass a
+``DataFrame`` of the cross-section to the applied function.
+
+.. ipython:: python
+
+ f = lambda x: (x-x.mean(1)/x.std(1))
+
+ result = panel.apply(f, axis = ['items','major_axis'])
+ result
+ result.loc[:,:,'ItemA']
+
+This is equivalent to the following
+
+.. ipython:: python
+
+ result = Panel(dict([ (ax,f(panel.loc[:,:,ax])) for ax in panel.minor_axis ]))
+ result
+ result.loc[:,:,'ItemA']
+
.. _basics.reindexing:
Reindexing and altering labels
@@ -1066,7 +1141,7 @@ or match a pattern:
Series(['1', '2', '3a', '3b', '03c']).str.match(pattern, as_indexer=True)
-The distinction between ``match`` and ``contains`` is strictness: ``match``
+The distinction between ``match`` and ``contains`` is strictness: ``match``
relies on strict ``re.match``, while ``contains`` relies on ``re.search``.
.. warning::
@@ -1078,7 +1153,7 @@ relies on strict ``re.match``, while ``contains`` relies on ``re.search``.
This old, deprecated behavior of ``match`` is still the default. As
demonstrated above, use the new behavior by setting ``as_indexer=True``.
In this mode, ``match`` is analagous to ``contains``, returning a boolean
- Series. The new behavior will become the default behavior in a future
+ Series. The new behavior will become the default behavior in a future
release.
Methods like ``match``, ``contains``, ``startswith``, and ``endswith`` take
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9f0b42dd5b741..fc9f18279087b 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -73,6 +73,9 @@ Improvements to existing features
- df.info() view now display dtype info per column (:issue: `5682`)
- perf improvements in DataFrame ``count/dropna`` for ``axis=1``
- Series.str.contains now has a `regex=False` keyword which can be faster for plain (non-regex) string patterns. (:issue: `5879`)
+ - support ``dtypes`` on ``Panel``
+ - extend ``Panel.apply`` to allow arbitrary functions (rather than only ufuncs) (:issue:`1148`)
+ allow multiple axes to be used to operate on slabs of a ``Panel``
.. _release.bug_fixes-0.13.1:
diff --git a/doc/source/v0.13.1.txt b/doc/source/v0.13.1.txt
index 250adffdadbca..76b915c519440 100644
--- a/doc/source/v0.13.1.txt
+++ b/doc/source/v0.13.1.txt
@@ -29,6 +29,60 @@ Deprecations
Enhancements
~~~~~~~~~~~~
+- ``Panel.apply`` will work on non-ufuncs. See :ref:`the docs<basics.apply_panel>`.
+
+ .. ipython:: python
+
+ import pandas.util.testing as tm
+ panel = tm.makePanel(5)
+ panel
+ panel['ItemA']
+
+ Specifying an ``apply`` that operates on a Series (to return a single element)
+
+ .. ipython:: python
+
+ panel.apply(lambda x: x.dtype, axis='items')
+
+ A similar reduction type operation
+
+ .. ipython:: python
+
+ panel.apply(lambda x: x.sum(), axis='major_axis')
+
+ This is equivalent to
+
+ .. ipython:: python
+
+ panel.sum('major_axis')
+
+ A transformation operation that returns a Panel, but is computing
+ the z-score across the major_axis
+
+ .. ipython:: python
+
+ result = panel.apply(lambda x: (x-x.mean())/x.std(), axis='major_axis')
+ result
+ result['ItemA']
+
+- ``Panel.apply`` operating on cross-sectional slabs. (:issue:`1148`)
+
+ .. ipython:: python
+
+ f = lambda x: (x-x.mean(1)/x.std(1))
+
+ result = panel.apply(f, axis = ['items','major_axis'])
+ result
+ result.loc[:,:,'ItemA']
+
+ This is equivalent to the following
+
+ .. ipython:: python
+
+ result = Panel(dict([ (ax,f(panel.loc[:,:,ax])) for ax in panel.minor_axis ]))
+ result
+ result.loc[:,:,'ItemA']
+
Experimental
~~~~~~~~~~~~
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index b6cd643f47c5a..8c50396c503a0 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -17,8 +17,10 @@
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
+from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
+from pandas.tools.util import cartesian_product
from pandas import compat
from pandas.util.decorators import deprecate, Appender, Substitution
import pandas.core.common as com
@@ -333,26 +335,34 @@ def axis_pretty(a):
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
- def _get_plane_axes(self, axis):
+ def _get_plane_axes_index(self, axis):
"""
- Get my plane axes: these are already
+ Get my plane axes indexes: these are already
(as compared with higher level planes),
- as we are returning a DataFrame axes
+ as we are returning a DataFrame axes indexes
"""
- axis = self._get_axis_name(axis)
+ axis_name = self._get_axis_name(axis)
- if axis == 'major_axis':
- index = self.minor_axis
- columns = self.items
- if axis == 'minor_axis':
- index = self.major_axis
- columns = self.items
- elif axis == 'items':
- index = self.major_axis
- columns = self.minor_axis
+ if axis_name == 'major_axis':
+ index = 'minor_axis'
+ columns = 'items'
+ if axis_name == 'minor_axis':
+ index = 'major_axis'
+ columns = 'items'
+ elif axis_name == 'items':
+ index = 'major_axis'
+ columns = 'minor_axis'
return index, columns
+ def _get_plane_axes(self, axis):
+ """
+ Get my plane axes indexes: these are already
+ (as compared with higher level planes),
+ as we are returning a DataFrame axes
+ """
+ return [ self._get_axis(axi) for axi in self._get_plane_axes_index(axis) ]
+
fromDict = from_dict
def to_sparse(self, fill_value=None, kind='block'):
@@ -431,6 +441,10 @@ def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
+ @property
+ def dtypes(self):
+ return self.apply(lambda x: x.dtype, axis='items')
+
#----------------------------------------------------------------------
# Getting and setting elements
@@ -827,25 +841,138 @@ def to_frame(self, filter_observations=True):
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
- def apply(self, func, axis='major'):
+ def apply(self, func, axis='major', **kwargs):
"""
- Apply
+ Applies function along input axis of the Panel
Parameters
----------
- func : numpy function
- Signature should match numpy.{sum, mean, var, std} etc.
+ func : function
+ Function to apply to each combination of 'other' axes
+ e.g. if axis = 'items', then the combination of major_axis/minor_axis
+ will be passed a Series
axis : {'major', 'minor', 'items'}
- fill_value : boolean, default True
- Replace NaN values with specified first
+ Additional keyword arguments will be passed as keywords to the function
+
+ Examples
+ --------
+ >>> p.apply(numpy.sqrt) # returns a Panel
+ >>> p.apply(lambda x: x.sum(), axis=0) # equiv to p.sum(0)
+ >>> p.apply(lambda x: x.sum(), axis=1) # equiv to p.sum(1)
+ >>> p.apply(lambda x: x.sum(), axis=2) # equiv to p.sum(2)
Returns
-------
- result : DataFrame or Panel
+ result : Pandas Object
"""
- i = self._get_axis_number(axis)
- result = np.apply_along_axis(func, i, self.values)
- return self._wrap_result(result, axis=axis)
+
+ if kwargs and not isinstance(func, np.ufunc):
+ f = lambda x: func(x, **kwargs)
+ else:
+ f = func
+
+ # 2d-slabs
+ if isinstance(axis, (tuple,list)) and len(axis) == 2:
+ return self._apply_2d(f, axis=axis)
+
+ axis = self._get_axis_number(axis)
+
+ # try ufunc like
+ if isinstance(f, np.ufunc):
+ try:
+ result = np.apply_along_axis(func, axis, self.values)
+ return self._wrap_result(result, axis=axis)
+ except (AttributeError):
+ pass
+
+ # 1d
+ return self._apply_1d(f, axis=axis)
+
+ def _apply_1d(self, func, axis):
+
+ axis_name = self._get_axis_name(axis)
+ ax = self._get_axis(axis)
+ ndim = self.ndim
+ values = self.values
+
+ # iter thru the axes
+ slice_axis = self._get_axis(axis)
+ slice_indexer = [0]*(ndim-1)
+ indexer = np.zeros(ndim, 'O')
+ indlist = list(range(ndim))
+ indlist.remove(axis)
+ indexer[axis] = slice(None, None)
+ indexer.put(indlist, slice_indexer)
+ planes = [ self._get_axis(axi) for axi in indlist ]
+ shape = np.array(self.shape).take(indlist)
+
+ # all the iteration points
+ points = cartesian_product(planes)
+
+ results = []
+ for i in range(np.prod(shape)):
+
+ # construct the object
+ pts = tuple([ p[i] for p in points ])
+ indexer.put(indlist, slice_indexer)
+
+ obj = Series(values[tuple(indexer)],index=slice_axis,name=pts)
+ result = func(obj)
+
+ results.append(result)
+
+ # increment the indexer
+ slice_indexer[-1] += 1
+ n = -1
+ while (slice_indexer[n] >= shape[n]) and (n > (1-ndim)):
+ slice_indexer[n-1] += 1
+ slice_indexer[n] = 0
+ n -= 1
+
+ # empty object
+ if not len(results):
+ return self._constructor(**self._construct_axes_dict())
+
+ # same ndim as current
+ if isinstance(results[0],Series):
+ arr = np.vstack([ r.values for r in results ])
+ arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
+ tranp = np.array([axis]+indlist).argsort()
+ arr = arr.transpose(tuple(list(tranp)))
+ return self._constructor(arr,**self._construct_axes_dict())
+
+ # ndim-1 shape
+ results = np.array(results).reshape(shape)
+ if results.ndim == 2 and axis_name != self._info_axis_name:
+ results = results.T
+ planes = planes[::-1]
+ return self._construct_return_type(results,planes)
+
+ def _apply_2d(self, func, axis):
+ """ handle 2-d slices, equiv to iterating over the other axis """
+
+ ndim = self.ndim
+ axis = [ self._get_axis_number(a) for a in axis ]
+
+ # construct slabs, in 2-d this is a DataFrame result
+ indexer_axis = list(range(ndim))
+ for a in axis:
+ indexer_axis.remove(a)
+ indexer_axis = indexer_axis[0]
+
+ slicer = [ slice(None,None) ] * ndim
+ ax = self._get_axis(indexer_axis)
+
+ results = []
+ for i, e in enumerate(ax):
+
+ slicer[indexer_axis] = i
+ sliced = self.iloc[tuple(slicer)]
+
+ obj = func(sliced)
+ results.append((e,obj))
+
+ return self._construct_return_type(dict(results))
def _reduce(self, op, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
@@ -863,13 +990,33 @@ def _reduce(self, op, axis=0, skipna=True, numeric_only=None,
def _construct_return_type(self, result, axes=None, **kwargs):
""" return the type for the ndim of the result """
- ndim = result.ndim
- if self.ndim == ndim:
+ ndim = getattr(result,'ndim',None)
+
+ # need to assume they are the same
+ if ndim is None:
+ if isinstance(result,dict):
+ ndim = getattr(list(compat.itervalues(result))[0],'ndim',None)
+
+ # a saclar result
+ if ndim is None:
+ ndim = 0
+
+ # have a dict, so top-level is +1 dim
+ else:
+ ndim += 1
+
+ # scalar
+ if ndim == 0:
+ return Series(result)
+
+ # same as self
+ elif self.ndim == ndim:
""" return the construction dictionary for these axes """
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
+ # sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
@@ -877,7 +1024,7 @@ def _construct_return_type(self, result, axes=None, **kwargs):
result, **self._extract_axes_for_slice(self, axes))
raise PandasError('invalid _construct_return_type [self->%s] '
- '[result->%s]' % (self.ndim, result.ndim))
+ '[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py
index a7cfe49484d24..3eebd51190e3d 100644
--- a/pandas/core/panelnd.py
+++ b/pandas/core/panelnd.py
@@ -56,9 +56,10 @@ def __init__(self, *args, **kwargs):
self._init_data(*args, **kwargs)
klass.__init__ = __init__
- def _get_plane_axes(self, axis):
+ def _get_plane_axes_index(self, axis):
+ """ return the sliced index for this object """
- axis = self._get_axis_name(axis)
+ axis_name = self._get_axis_name(axis)
index = self._AXIS_ORDERS.index(axis)
planes = []
@@ -67,8 +68,8 @@ def _get_plane_axes(self, axis):
if index != self._AXIS_LEN:
planes.extend(self._AXIS_ORDERS[index + 1:])
- return [getattr(self, p) for p in planes]
- klass._get_plane_axes = _get_plane_axes
+ return planes
+ klass._get_plane_axes_index = _get_plane_axes_index
def _combine(self, other, func, axis=0):
if isinstance(other, klass):
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 30500ac57a7f6..08d3afe63ec86 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1061,6 +1061,105 @@ def test_convert_objects(self):
result = p.convert_objects(convert_numeric='force')
assert_panel_equal(result, expected)
+ def test_dtypes(self):
+
+ result = self.panel.dtypes
+ expected = DataFrame(np.dtype('float64'),index=self.panel.major_axis,columns=self.panel.minor_axis)
+ assert_frame_equal(result, expected)
+
+ def test_apply(self):
+ # GH1148
+
+ from pandas import Series,DataFrame
+
+ # ufunc
+ applied = self.panel.apply(np.sqrt)
+ self.assert_(assert_almost_equal(applied.values,
+ np.sqrt(self.panel.values)))
+
+ # ufunc same shape
+ result = self.panel.apply(lambda x: x*2, axis='items')
+ expected = self.panel*2
+ assert_panel_equal(result, expected)
+ result = self.panel.apply(lambda x: x*2, axis='major_axis')
+ expected = self.panel*2
+ assert_panel_equal(result, expected)
+ result = self.panel.apply(lambda x: x*2, axis='minor_axis')
+ expected = self.panel*2
+ assert_panel_equal(result, expected)
+
+ # reduction to DataFrame
+ result = self.panel.apply(lambda x: x.dtype, axis='items')
+ expected = DataFrame(np.dtype('float64'),index=self.panel.major_axis,columns=self.panel.minor_axis)
+ assert_frame_equal(result,expected)
+ result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
+ expected = DataFrame(np.dtype('float64'),index=self.panel.minor_axis,columns=self.panel.items)
+ assert_frame_equal(result,expected)
+ result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
+ expected = DataFrame(np.dtype('float64'),index=self.panel.major_axis,columns=self.panel.items)
+ assert_frame_equal(result,expected)
+
+ # reductions via other dims
+ expected = self.panel.sum(0)
+ result = self.panel.apply(lambda x: x.sum(), axis='items')
+ assert_frame_equal(result,expected)
+ expected = self.panel.sum(1)
+ result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
+ assert_frame_equal(result,expected)
+ expected = self.panel.sum(2)
+ result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
+ assert_frame_equal(result,expected)
+
+ # pass kwargs
+ result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
+ expected = self.panel.sum(0) + 5
+ assert_frame_equal(result,expected)
+
+ def test_apply_slabs(self):
+
+ # same shape as original
+ result = self.panel.apply(lambda x: x*2, axis = ['items','major_axis'])
+ expected = (self.panel*2).transpose('minor_axis','major_axis','items')
+ assert_panel_equal(result,expected)
+ result = self.panel.apply(lambda x: x*2, axis = ['major_axis','items'])
+ assert_panel_equal(result,expected)
+
+ result = self.panel.apply(lambda x: x*2, axis = ['items','minor_axis'])
+ expected = (self.panel*2).transpose('major_axis','minor_axis','items')
+ assert_panel_equal(result,expected)
+ result = self.panel.apply(lambda x: x*2, axis = ['minor_axis','items'])
+ assert_panel_equal(result,expected)
+
+ result = self.panel.apply(lambda x: x*2, axis = ['major_axis','minor_axis'])
+ expected = self.panel*2
+ assert_panel_equal(result,expected)
+ result = self.panel.apply(lambda x: x*2, axis = ['minor_axis','major_axis'])
+ assert_panel_equal(result,expected)
+
+ # reductions
+ result = self.panel.apply(lambda x: x.sum(0), axis = ['items','major_axis'])
+ expected = self.panel.sum(1).T
+ assert_frame_equal(result,expected)
+
+ result = self.panel.apply(lambda x: x.sum(1), axis = ['items','major_axis'])
+ expected = self.panel.sum(0)
+ assert_frame_equal(result,expected)
+
+ # transforms
+ f = lambda x: (x-x.mean(1)/x.std(1))
+
+ result = self.panel.apply(f, axis = ['items','major_axis'])
+ expected = Panel(dict([ (ax,f(self.panel.loc[:,:,ax])) for ax in self.panel.minor_axis ]))
+ assert_panel_equal(result,expected)
+
+ result = self.panel.apply(f, axis = ['major_axis','minor_axis'])
+ expected = Panel(dict([ (ax,f(self.panel.loc[ax])) for ax in self.panel.items ]))
+ assert_panel_equal(result,expected)
+
+ result = self.panel.apply(f, axis = ['minor_axis','items'])
+ expected = Panel(dict([ (ax,f(self.panel.loc[:,ax])) for ax in self.panel.major_axis ]))
+ assert_panel_equal(result,expected)
+
def test_reindex(self):
ref = self.panel['ItemB']
@@ -1989,12 +2088,6 @@ def test_get_dummies(self):
dummies = get_dummies(self.panel['Label'])
self.assert_(np.array_equal(dummies.values, minor_dummies.values))
- def test_apply(self):
- # ufunc
- applied = self.panel.apply(np.sqrt)
- self.assert_(assert_almost_equal(applied.values,
- np.sqrt(self.panel.values)))
-
def test_mean(self):
means = self.panel.mean(level='minor')
| closes #1148
A reproduction of the new docs section
Applying with a Panel will pass a Series to the applied function. If the applied function returns a Series, the result of the application will be a Panel. If the applied function reduces to a scalar, the result of the application will be a DataFrame.
Note Prior to 0.13.1 apply on a Panel would only work on ufuncs (e.g. np.sum/np.max).
```
In [120]: import pandas.util.testing as tm
In [121]: panel = tm.makePanel(5)
In [122]: panel
<class 'pandas.core.panel.Panel'>
Dimensions: 3 (items) x 5 (major_axis) x 4 (minor_axis)
Items axis: ItemA to ItemC
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: A to D
In [123]: panel['ItemA']
A B C D
2000-01-03 0.166882 -0.597361 -1.200639 0.174260
2000-01-04 -1.759496 -1.514940 -1.872993 -0.581163
2000-01-05 0.901336 -1.640398 0.825210 0.087916
2000-01-06 -0.317478 -1.130643 -0.392715 0.416971
2000-01-07 -0.681335 -0.245890 -1.994150 0.666084
[5 rows x 4 columns]
```
A transformational apply.
```
In [124]: result = panel.apply(lambda x: x*2, axis='items')
In [125]: result
<class 'pandas.core.panel.Panel'>
Dimensions: 3 (items) x 5 (major_axis) x 4 (minor_axis)
Items axis: ItemA to ItemC
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: A to D
In [126]: result['ItemA']
A B C D
2000-01-03 0.333764 -1.194722 -2.401278 0.348520
2000-01-04 -3.518991 -3.029880 -3.745986 -1.162326
2000-01-05 1.802673 -3.280796 1.650421 0.175832
2000-01-06 -0.634955 -2.261286 -0.785430 0.833943
2000-01-07 -1.362670 -0.491779 -3.988300 1.332168
[5 rows x 4 columns]
```
A reduction operation.
```
In [127]: panel.apply(lambda x: x.dtype, axis='items')
A B C D
2000-01-03 float64 float64 float64 float64
2000-01-04 float64 float64 float64 float64
2000-01-05 float64 float64 float64 float64
2000-01-06 float64 float64 float64 float64
2000-01-07 float64 float64 float64 float64
[5 rows x 4 columns]
```
A similar reduction type operation
```
In [128]: panel.apply(lambda x: x.sum(), axis='major_axis')
ItemA ItemB ItemC
A -1.690090 1.840259 0.010754
B -5.129232 0.860182 0.178018
C -4.635286 0.545328 2.456520
D 0.764068 -3.623586 1.761541
[4 rows x 3 columns]
```
This last reduction is equivalent to
```
In [129]: panel.sum('major_axis')
ItemA ItemB ItemC
A -1.690090 1.840259 0.010754
B -5.129232 0.860182 0.178018
C -4.635286 0.545328 2.456520
D 0.764068 -3.623586 1.761541
[4 rows x 3 columns]
```
A transformation operation that returns a Panel, but is computing the z-score across the major_axis.
```
In [130]: result = panel.apply(lambda x: (x-x.mean())/x.std(), axis='major_axis')
In [131]: result
<class 'pandas.core.panel.Panel'>
Dimensions: 3 (items) x 5 (major_axis) x 4 (minor_axis)
Items axis: ItemA to ItemC
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: A to D
In [132]: result['ItemA']
A B C D
2000-01-03 0.509389 0.719204 -0.234072 0.045812
2000-01-04 -1.434116 -0.820934 -0.809328 -1.567858
2000-01-05 1.250373 -1.031513 1.499214 -0.138629
2000-01-06 0.020723 -0.175899 0.457175 0.564271
2000-01-07 -0.346370 1.309142 -0.912988 1.096405
[5 rows x 4 columns]
```
Apply can also accept multiple axes in the axis argument. This will pass a DataFrame of the cross-section to the applied function.
```
In [133]: f = lambda x: (x-x.mean(1)/x.std(1))
In [134]: result = panel.apply(f, axis = ['items','major_axis'])
In [135]: result
<class 'pandas.core.panel.Panel'>
Dimensions: 4 (items) x 5 (major_axis) x 3 (minor_axis)
Items axis: A to D
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: ItemA to ItemC
In [136]: result.loc[:,:,'ItemA']
A B C D
2000-01-03 0.748886 -0.323319 -1.172352 0.370451
2000-01-04 -1.594544 -1.659365 -1.444732 -0.162764
2000-01-05 0.908832 -1.220236 0.237668 0.754405
2000-01-06 -1.024669 -0.081850 -0.792957 0.641960
2000-01-07 -0.884333 -0.472889 -1.474646 -0.671871
[5 rows x 4 columns]
```
This is equivalent to the following
```
In [137]: result = Panel(dict([ (ax,f(panel.loc[:,:,ax])) for ax in panel.minor_axis ]))
In [138]: result
<class 'pandas.core.panel.Panel'>
Dimensions: 4 (items) x 5 (major_axis) x 3 (minor_axis)
Items axis: A to D
Major_axis axis: 2000-01-03 00:00:00 to 2000-01-07 00:00:00
Minor_axis axis: ItemA to ItemC
In [139]: result.loc[:,:,'ItemA']
A B C D
2000-01-03 0.748886 -0.323319 -1.172352 0.370451
2000-01-04 -1.594544 -1.659365 -1.444732 -0.162764
2000-01-05 0.908832 -1.220236 0.237668 0.754405
2000-01-06 -1.024669 -0.081850 -0.792957 0.641960
2000-01-07 -0.884333 -0.472889 -1.474646 -0.671871
[5 rows x 4 columns]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5850 | 2014-01-04T18:03:12Z | 2014-01-15T02:53:46Z | 2014-01-15T02:53:46Z | 2014-06-25T15:23:43Z |
BUG: Bug in selection with missing values via .ix from a duplicate indexed DataFrame failing | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 3d4b5e8facfa4..0150b233110a7 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -73,6 +73,7 @@ Bug Fixes
~~~~~~~~~
- Bug in Series replace with timestamp dict (:issue:`5797`)
- read_csv/read_table now respects the `prefix` kwarg (:issue:`5732`).
+ - Bug in selection with missing values via ``.ix`` from a duplicate indexed DataFrame failing (:issue:`5835`)
pandas 0.13.0
-------------
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index d636edeec0815..7f49f7c1993bd 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -3119,6 +3119,9 @@ def reindex_indexer(self, new_axis, indexer, axis=1, fill_value=None,
if not allow_dups and not self.axes[axis].is_unique:
raise ValueError("cannot reindex from a duplicate axis")
+ if not self.is_consolidated():
+ self = self.consolidate()
+
if axis == 0:
return self._reindex_indexer_items(new_axis, indexer, fill_value)
@@ -3140,38 +3143,62 @@ def _reindex_indexer_items(self, new_items, indexer, fill_value):
new_blocks = []
is_unique = new_items.is_unique
+ # we have duplicates in the items and what we are reindexing
+ if not is_unique and not self.items.is_unique:
+
+ rl = self._set_ref_locs(do_refs='force')
+ for i, idx in enumerate(indexer):
+ item = new_items.take([i])
+ if idx >= 0:
+ blk, lidx = rl[idx]
+ blk = make_block(_block_shape(blk.iget(lidx)), item,
+ new_items, ndim=self.ndim, fastpath=True,
+ placement=[i])
+
+ # a missing value
+ else:
+ blk = self._make_na_block(item,
+ new_items,
+ placement=[i],
+ fill_value=fill_value)
+ new_blocks.append(blk)
+ new_blocks = _consolidate(new_blocks, new_items)
+
+
# keep track of what items aren't found anywhere
- l = np.arange(len(item_order))
- mask = np.zeros(len(item_order), dtype=bool)
- for blk in self.blocks:
- blk_indexer = blk.items.get_indexer(item_order)
- selector = blk_indexer != -1
+ else:
+ l = np.arange(len(item_order))
+ mask = np.zeros(len(item_order), dtype=bool)
- # update with observed items
- mask |= selector
+ for blk in self.blocks:
+ blk_indexer = blk.items.get_indexer(item_order)
+ selector = blk_indexer != -1
+
+ # update with observed items
+ mask |= selector
- if not selector.any():
- continue
+ if not selector.any():
+ continue
- new_block_items = new_items.take(selector.nonzero()[0])
- new_values = com.take_nd(blk.values, blk_indexer[selector], axis=0,
- allow_fill=False)
- placement = l[selector] if not is_unique else None
- new_blocks.append(make_block(new_values,
- new_block_items,
+ new_block_items = new_items.take(selector.nonzero()[0])
+ new_values = com.take_nd(blk.values, blk_indexer[selector], axis=0,
+ allow_fill=False)
+ placement = l[selector] if not is_unique else None
+ new_blocks.append(make_block(new_values,
+ new_block_items,
new_items,
- placement=placement,
- fastpath=True))
-
- if not mask.all():
- na_items = new_items[-mask]
- placement = l[-mask] if not is_unique else None
- na_block = self._make_na_block(na_items,
- new_items,
- placement=placement,
- fill_value=fill_value)
- new_blocks.append(na_block)
- new_blocks = _consolidate(new_blocks, new_items)
+ placement=placement,
+ fastpath=True))
+
+ if not mask.all():
+ na_items = new_items[-mask]
+ placement = l[-mask] if not is_unique else None
+ na_block = self._make_na_block(na_items,
+ new_items,
+ placement=placement,
+ fill_value=fill_value)
+ new_blocks.append(na_block)
+ new_blocks = _consolidate(new_blocks, new_items)
return self.__class__(new_blocks, new_axes)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index fe3aac0e9eeaa..c48b4b84698b6 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -981,6 +981,14 @@ def test_dups_fancy_indexing(self):
result = df.ix[['A','A','E']]
assert_frame_equal(result, expected)
+ # GH 5835
+ # dups on index and missing values
+ df = DataFrame(np.random.randn(5,5),columns=['A','B','B','B','A'])
+
+ expected = pd.concat([df.ix[:,['A','B']],DataFrame(np.nan,columns=['C'],index=df.index)],axis=1)
+ result = df.ix[:,['A','B','C']]
+ assert_frame_equal(result, expected)
+
def test_indexing_mixed_frame_bug(self):
# GH3492
| closes #5835
| https://api.github.com/repos/pandas-dev/pandas/pulls/5849 | 2014-01-04T17:20:44Z | 2014-01-04T18:53:09Z | 2014-01-04T18:53:09Z | 2014-07-09T13:06:17Z |
BUG: tail raises on empty DataFrame | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9ab175c07f169..c51651d321cd6 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -90,6 +90,8 @@ Bug Fixes
- Bug in ``BusinessDay`` when adding n days to a date not on offset when n>5 and n%5==0 (:issue:`5890`)
- Bug in assigning to chained series with a series via ix (:issue:`5928`)
- Bug in creating an empty DataFrame, copying, then assigning (:issue:`5932`)
+ - Bug in DataFrame.tail with empty frame (:issue:`5846`)
+ - DataFrame.head(0) returns self instead of empty frame (:issue:`5846`)
pandas 0.13.0
-------------
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e5c5f362d7f58..a5a7021d9f4b7 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1570,8 +1570,12 @@ def head(self, n=5):
Returns first n rows
"""
l = len(self)
- if abs(n) > l:
- n = l if n > 0 else -l
+ if l == 0 or n==0:
+ return self
+ if n > l:
+ n = l
+ elif n < -l:
+ n = -l
return self.iloc[:n]
def tail(self, n=5):
@@ -1579,8 +1583,12 @@ def tail(self, n=5):
Returns last n rows
"""
l = len(self)
- if abs(n) > l:
- n = l if n > 0 else -l
+ if l == 0 or n == 0:
+ return self
+ if n > l:
+ n = l
+ elif n < -l:
+ n = -l
return self.iloc[-n:]
#----------------------------------------------------------------------
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index bded2fad36763..bfa57590e1760 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4259,12 +4259,25 @@ def test_repr_column_name_unicode_truncation_bug(self):
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
-
+ assert_frame_equal(self.frame.head(0), self.frame)
+ assert_frame_equal(self.frame.tail(0), self.frame)
+ assert_frame_equal(self.frame.head(-1), self.frame[:-1])
+ assert_frame_equal(self.frame.tail(-1), self.frame[1:])
+ assert_frame_equal(self.frame.head(1), self.frame[:1])
+ assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
+ assert_frame_equal(df.head(0), df)
+ assert_frame_equal(df.tail(0), df)
+ assert_frame_equal(df.head(-1), df.iloc[:-1])
+ assert_frame_equal(df.tail(-1), df.iloc[1:])
+ #test empty dataframe
+ empty_df = DataFrame()
+ assert_frame_equal(empty_df.tail(), empty_df)
+ assert_frame_equal(empty_df.head(), empty_df)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 97e25f105db70..14082486f80a0 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -338,7 +338,7 @@ def test_head_tail(self):
self._compare(o.tail(), o.iloc[-5:])
# 0-len
- self._compare(o.head(0), o.iloc[:0])
+ self._compare(o.head(0), o.iloc[:])
self._compare(o.tail(0), o.iloc[0:])
# bounded
| closes #5846
calling tail on an empty frame threw an exception. In addition, df.tail(0) returns self without indexing.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5848 | 2014-01-04T16:18:26Z | 2014-01-15T00:34:38Z | 2014-01-15T00:34:38Z | 2014-06-29T04:32:50Z |
ENH: Improve error message for PeriodIndex to infer_freq. Closes #5841. | diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index cfe874484231b..3892897e43bb0 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -358,7 +358,7 @@ def get_offset(name):
else:
if name in _rule_aliases:
name = _rule_aliases[name]
-
+
if name not in _offset_map:
try:
# generate and cache offset
@@ -625,7 +625,7 @@ def _period_str_to_code(freqstr):
alias = _period_alias_dict[freqstr]
except KeyError:
raise ValueError("Unknown freqstr: %s" % freqstr)
-
+
return _period_code_map[alias]
@@ -647,6 +647,10 @@ def infer_freq(index, warn=True):
from pandas.tseries.index import DatetimeIndex
if not isinstance(index, DatetimeIndex):
+ from pandas.tseries.period import PeriodIndex
+ if isinstance(index, PeriodIndex):
+ raise ValueError("PeriodIndex given. Check the `freq` attribute "
+ "instead of using infer_freq.")
index = DatetimeIndex(index)
inferer = _FrequencyInferer(index, warn=warn)
@@ -850,7 +854,7 @@ def _get_wom_rule(self):
weekdays = unique(self.index.weekday)
if len(weekdays) > 1:
return None
-
+
week_of_months = unique((self.index.day - 1) // 7)
if len(week_of_months) > 1:
return None
diff --git a/pandas/tseries/tests/test_frequencies.py b/pandas/tseries/tests/test_frequencies.py
index ad9c93592a26c..8d95e22e4c6f2 100644
--- a/pandas/tseries/tests/test_frequencies.py
+++ b/pandas/tseries/tests/test_frequencies.py
@@ -13,6 +13,7 @@
from pandas.tseries.tools import to_datetime
import pandas.tseries.frequencies as fmod
import pandas.tseries.offsets as offsets
+from pandas.tseries.period import PeriodIndex
import pandas.lib as lib
@@ -88,6 +89,10 @@ def test_anchored_shortcuts():
class TestFrequencyInference(tm.TestCase):
+ def test_raise_if_period_index(self):
+ index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
+ self.assertRaises(ValueError, infer_freq, index)
+
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, infer_freq, index)
| closes #5841. I'm still not sure we shouldn't just return `freq` if a PeriodIndex is given. Seems easy enough.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5847 | 2014-01-04T15:34:02Z | 2014-01-04T18:48:48Z | 2014-01-04T18:48:48Z | 2014-07-16T08:45:16Z |
BLD: fix cythonized msgpack extension in setup.py GH5831 | diff --git a/setup.py b/setup.py
index 497c6a5644def..ce26946b76124 100755
--- a/setup.py
+++ b/setup.py
@@ -486,7 +486,8 @@ def pxd(name):
msgpack_ext = Extension('pandas.msgpack',
sources = [srcpath('msgpack',
- suffix=suffix, subdir='')],
+ suffix=suffix if suffix == '.pyx' else '.cpp',
+ subdir='')],
language='c++',
include_dirs=common_include,
define_macros=macros)
@@ -499,7 +500,7 @@ def pxd(name):
if suffix == '.pyx' and 'setuptools' in sys.modules:
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
- if ext.sources[0].endswith('.c'):
+ if ext.sources[0].endswith(('.c','.cpp')):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
| closes #5831
@yarikoptic, can you confirm this fixes the problem?
| https://api.github.com/repos/pandas-dev/pandas/pulls/5844 | 2014-01-04T03:28:50Z | 2014-01-10T12:26:16Z | 2014-01-10T12:26:16Z | 2014-06-24T04:26:01Z |
Add pandas cookbook to tutorials (for #5837) | diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst
index fe12b9e9d855d..4ad9082b0cb9b 100644
--- a/doc/source/tutorials.rst
+++ b/doc/source/tutorials.rst
@@ -16,3 +16,43 @@ More complex recipes are in the :ref:`Cookbook<cookbook>`
Tutorials
---------
+
+Pandas Cookbook
+---------------
+
+The goal of this cookbook (by `Julia Evans <http://jvns.ca>`_) is to
+give you some concrete examples for getting started with pandas. These
+are examples with real-world data, and all the bugs and weirdness that
+that entails.
+
+Here are links to the v0.1 release. For an up-to-date table of contents, see the `pandas-cookbook GitHub
+repository <http://github.com/jvns/pandas-cookbook>`_.
+
+* | `A quick tour of the IPython
+ Notebook <http://nbviewer.ipython.org/github/jvns/pandas-c|%2055ookbook/blob/v0.1/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb>`_
+ | Shows off IPython's awesome tab completion and magic functions.
+* | `Chapter 1: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb>`_
+ Reading your data into pandas is pretty much the easiest thing. Even
+ when the encoding is wrong!
+* | `Chapter 2: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%202%20-%20Selecting%20data%20&%20finding%20the%20most%20common%20complaint%20type.ipynb>`_
+ It's not totally obvious how to select data from a pandas dataframe.
+ Here we explain the basics (how to take slices and get columns)
+* | `Chapter 3: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%203%20-%20Which%20borough%20has%20the%20most%20noise%20complaints%3F%20%28or%2C%20more%20selecting%20data%29.ipynb>`_
+ Here we get into serious slicing and dicing and learn how to filter
+ dataframes in complicated ways, really fast.
+* | `Chapter 4: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%204%20-%20Find%20out%20on%20which%20weekday%20people%20bike%20the%20most%20with%20groupby%20and%20aggregate.ipynb>`_
+ Groupby/aggregate is seriously my favorite thing about pandas
+ and I use it all the time. You should probably read this.
+* | `Chapter 5: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%205%20-%20Combining%20dataframes%20and%20scraping%20Canadian%20weather%20data.ipynb>`_
+ Here you get to find out if it's cold in Montreal in the winter
+ (spoiler: yes). Web scraping with pandas is fun! Here we combine dataframes.
+* | `Chapter 6: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%206%20-%20String%20operations%21%20Which%20month%20was%20the%20snowiest%3F.ipynb>`_
+ Strings with pandas are great. It has all these vectorized string
+ operations and they're the best. We will turn a bunch of strings
+ containing "Snow" into vectors of numbers in a trice.
+* | `Chapter 7: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%207%20-%20Cleaning%20up%20messy%20data.ipynb>`_
+ Cleaning up messy data is never a joy, but with pandas it's easier.
+* | `Chapter 8: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%208%20-%20How%20to%20deal%20with%20timestamps.ipynb>`_
+ Parsing Unix timestamps is confusing at first but it turns out
+ to be really easy.
+
| @jreback asked me to add something like this in https://github.com/jvns/pandas-cookbook/issues/1
I haven't been able to test that this displays correctly as I can't figure out how to build the Sphinx docs.
If someone can tell me how to build the docs, I can test it.
```
bork@kiwi ~/c/p/pandas> pwd
/home/bork/clones/pandas/pandas
bork@kiwi ~/c/p/pandas> python ../doc/make.py html
Error: Cannot find source directory.
Building HTML failed
```
```
bork@kiwi ~/c/p/doc> pwd
/home/bork/clones/pandas/doc
bork@kiwi ~/c/p/doc> python make.py html
Running Sphinx v1.1.3
cannot import name hashtable
Exception occurred while building, starting debugger:
Traceback (most recent call last):
File "/opt/anaconda/lib/python2.7/site-packages/sphinx/cmdline.py", line 188, in main
warningiserror, tags)
File "/opt/anaconda/lib/python2.7/site-packages/sphinx/application.py", line 102, in __init__
confoverrides or {}, self.tags)
File "/opt/anaconda/lib/python2.7/site-packages/sphinx/config.py", line 216, in __init__
exec code in config
File "/home/bork/clones/pandas/doc/source/conf.py", line 74, in <module>
import pandas
File "/home/bork/clones/pandas/pandas/__init__.py", line 6, in <module>
from . import hashtable, tslib, lib
ImportError: cannot import name hashtable
> /home/bork/clones/pandas/pandas/__init__.py(6)<module>()
-> from . import hashtable, tslib, lib
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5842 | 2014-01-03T22:42:00Z | 2014-01-07T11:04:01Z | 2014-01-07T11:04:01Z | 2014-06-26T12:23:17Z |
DOC: minor fix in extract docstring | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 1b2c80f90f97b..528440f454e57 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1057,14 +1057,14 @@ You can check whether elements contain a pattern:
.. ipython:: python
pattern = r'[a-z][0-9]'
- Series(['1', '2', '3a', '3b', '03c']).contains(pattern)
+ Series(['1', '2', '3a', '3b', '03c']).str.contains(pattern)
or match a pattern:
.. ipython:: python
- Series(['1', '2', '3a', '3b', '03c']).match(pattern, as_indexer=True)
+ Series(['1', '2', '3a', '3b', '03c']).str.match(pattern, as_indexer=True)
The distinction between ``match`` and ``contains`` is strictness: ``match``
relies on strict ``re.match``, while ``contains`` relies on ``re.search``.
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 02f422bb0b635..1d9139fa9a1c7 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -333,15 +333,11 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False):
Returns
-------
- boolean Series
+ Series of boolean values
if as_indexer=True
Series of tuples
if as_indexer=False, default but deprecated
- Returns
- -------
- Series of boolean values
-
See Also
--------
contains : analagous, but less strict, relying on re.search instead of
@@ -414,14 +410,27 @@ def str_extract(arr, pat, flags=0):
A pattern with more than one group will return a DataFrame.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
+ 0 1
+ 0 a 1
+ 1 b 2
+ 2 NaN NaN
A pattern may contain optional groups.
>>> Series(['a1', 'b2', 'c3']).str.extract('([ab])?(\d)')
+ 0 1
+ 0 a 1
+ 1 b 2
+ 2 NaN 3
Named groups will become column names in the result.
>>> Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)')
+ letter digit
+ 0 a 1
+ 1 b 2
+ 2 NaN NaN
+
"""
regex = re.compile(pat, flags=flags)
| https://api.github.com/repos/pandas-dev/pandas/pulls/5838 | 2014-01-03T18:39:14Z | 2014-01-04T09:55:17Z | 2014-01-04T09:55:17Z | 2014-06-20T01:21:42Z | |
DOC: change doc refs to 0.13.1 | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 38ba0b064c192..0c4de4e5173de 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1457,9 +1457,9 @@ It's also possible to reset multiple options at once (using a regex):
reset_option("^display")
-.. versionadded:: 0.14.0
+.. versionadded:: 0.13.1
- Beginning with v0.14.0 the `option_context` context manager has been exposed through
+ Beginning with v0.13.1 the `option_context` context manager has been exposed through
the top-level API, allowing you to execute code with given option values. Option values
are restored automatically when you exit the `with` block:
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0b25d0f6aa61a..3d4b5e8facfa4 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -45,7 +45,7 @@ analysis / manipulation tool available in any language.
* Binary installers on PyPI: http://pypi.python.org/pypi/pandas
* Documentation: http://pandas.pydata.org
-pandas 0.14.0
+pandas 0.13.1
-------------
**Release date:** not-yet-released
@@ -56,7 +56,7 @@ New features
API Changes
~~~~~~~~~~~
-.. _release.bug_fixes-0.14.0:
+.. _release.bug_fixes-0.13.1:
Experimental Features
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index cbe8a776e64ef..8d46c8c54c5c6 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -1,6 +1,6 @@
.. _whatsnew_0130:
-v0.13.0 (January 1, 2014)
+v0.13.0 (January 3, 2014)
---------------------------
This is a major release from 0.12.0 and includes a number of API changes, several new features and
diff --git a/doc/source/v0.14.0.txt b/doc/source/v0.13.1.txt
similarity index 74%
rename from doc/source/v0.14.0.txt
rename to doc/source/v0.13.1.txt
index b949a536043b4..dcb8564c89457 100644
--- a/doc/source/v0.14.0.txt
+++ b/doc/source/v0.13.1.txt
@@ -1,6 +1,6 @@
-.. _whatsnew_0140:
+.. _whatsnew_0131:
-v0.14.0 (???)
+v0.13.1 (???)
-------------
This is a major release from 0.13.0 and includes a number of API changes, several new features and
@@ -18,7 +18,7 @@ API changes
Prior Version Deprecations/Changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-These were announced changes in 0.13 or prior that are taking effect as of 0.14.0
+These were announced changes in 0.13 or prior that are taking effect as of 0.13.1
Deprecations
~~~~~~~~~~~~
@@ -32,9 +32,7 @@ Experimental
Bug Fixes
~~~~~~~~~
- - read_csv/read_table now respects the `prefix` kwarg (:issue:`5732`).
-
-See :ref:`V0.14.0 Bug Fixes<release.bug_fixes-0.14.0>` for an extensive list of bugs that have been fixed in 0.14.0.
+See :ref:`V0.13.1 Bug Fixes<release.bug_fixes-0.13.1>` for an extensive list of bugs that have been fixed in 0.13.1.
See the :ref:`full release notes
<release>` or issue tracker
| https://api.github.com/repos/pandas-dev/pandas/pulls/5833 | 2014-01-03T14:26:34Z | 2014-01-03T18:18:44Z | 2014-01-03T18:18:44Z | 2014-07-16T08:45:06Z | |
DOC: add 'pandas ecosystem' section to docs | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
new file mode 100644
index 0000000000000..4c9e4ca4ef7ee
--- /dev/null
+++ b/doc/source/ecosystem.rst
@@ -0,0 +1,60 @@
+****************
+Pandas Ecosystem
+****************
+
+Increasingly, packages are being built on top of pandas to address specific needs
+in data preparation, analysis and visualization.
+This is encouraging because it means pandas is not only helping users to handle
+their data tasks but also that provides a better starting point for developers to
+build powerful and more focused data tools.
+The creation of libraries that complement pandas' functionality also allows pandas
+development to remain focused around it's original requirements.
+
+This is an in-exhaustive list of projects that build on pandas in order to provide
+tools in the PyData space.
+
+We'd like to make it easier for users to find these project, if you know of other
+substantial projects that you feel should be on this list, please let us know.
+
+`Statsmodels <http://statsmodels.sourceforge.net>`__
+-----------
+
+Statsmodels is the prominent python "statistics and econometrics library" and it has
+a long-standing special relationship with pandas. Statsmodels provides powerful statistics,
+econometrics, analysis and modeling functionality that is out of pandas' scope.
+Statsmodels leverages pandas objects as the underlying data container for computation.
+
+`Vincent <https://github.com/wrobstory/vincent>`__
+-------
+
+The `Vincent <https://github.com/wrobstory/vincent>`__ project leverages `Vega <https://github.com/trifacta/vega>`__ to create
+plots (that in turn, leverages `d3 <http://d3js.org/>`__). It has great support for pandas data objects.
+
+`yhat/ggplot <https://github.com/yhat/ggplot>`__
+-----------
+
+Hadley Wickham's `ggplot2 <http://ggplot2.org/>`__is a foundational exploratory visualization package for the R language.
+Based on `"The Grammer of Graphics" <http://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html>`__ it
+provides a powerful, declarative and extremely general way to generate plots of arbitrary data.
+It's really quite incredible. Various implementations to other languages are available,
+but a faithful implementation for python users has long been missing. Although still young
+(as of Jan-2014), the `yhat/ggplot <https://github.com/yhat/ggplot>` project has been
+progressing quickly in that direction.
+
+
+`Seaborn <https://github.com/mwaskom/seaborn>`__
+-------
+
+Although pandas has quite a bit of "just plot it" functionality built-in, visualization and
+in particular statistical graphics is a vast field with a long tradition and lots of ground
+to cover. `The Seaborn project <https://github.com/mwaskom/seaborn>`__ builds on top of pandas
+and `matplotlib <http://matplotlib.org>`__ to provide easy plotting of data which extends to
+more advanced types of plots then those offered by pandas.
+
+
+`Geopandas <https://github.com/kjordahl/geopandas>`__
+---------
+
+Geopandas extends pandas data objects to include geographic information which support
+geometric operations. If your work entails maps and geographical coordinates, and
+you love pandas, you should take a close look at Geopandas.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index c406c4f2cfa27..a416e4af4e486 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -130,7 +130,7 @@ See the package overview for more detail about what's in the library.
sparse
gotchas
r_interface
- related
+ ecosystem
comparison_with_r
comparison_with_sql
api
diff --git a/doc/source/related.rst b/doc/source/related.rst
deleted file mode 100644
index 33dad8115e5b1..0000000000000
--- a/doc/source/related.rst
+++ /dev/null
@@ -1,57 +0,0 @@
-************************
-Related Python libraries
-************************
-
-la (larry)
-----------
-
-Keith Goodman's excellent `labeled array package
-<http://pypi.python.org/pypi/la>`__ is very similar to pandas in many regards,
-though with some key differences. The main philosophical design difference is
-to be a wrapper around a single NumPy ``ndarray`` object while adding axis
-labeling and label-based operations and indexing. Because of this, creating a
-size-mutable object with heterogeneous columns (e.g. DataFrame) is not possible
-with the ``la`` package.
-
- - Provide a single n-dimensional object with labeled axes with functionally
- analogous data alignment semantics to pandas objects
- - Advanced / label-based indexing similar to that provided in pandas but
- setting is not supported
- - Stays much closer to NumPy arrays than pandas-- ``larry`` objects must be
- homogeneously typed
- - GroupBy support is relatively limited, but a few functions are available:
- ``group_mean``, ``group_median``, and ``group_ranking``
- - It has a collection of analytical functions suited to quantitative
- portfolio construction for financial applications
- - It has a collection of moving window statistics implemented in
- `Bottleneck <http://pypi.python.org/pypi/Bottleneck>`__
-
-statsmodels
------------
-
-The main `statistics and econometrics library
-<http://statsmodels.sourceforge.net>`__ for Python. pandas has become a
-dependency of this library.
-
-scikits.timeseries
-------------------
-
-`scikits.timeseries <http://pytseries.sourceforge.net/>`__ provides a data
-structure for fixed frequency time series data based on the numpy.MaskedArray
-class. For time series data, it provides some of the same functionality to the
-pandas Series class. It has many more functions for time series-specific
-manipulation. Also, it has support for many more frequencies, though less
-customizable by the user (so 5-minutely data is easier to do with pandas for
-example).
-
-We are aiming to merge these libraries together in the near future.
-
-Progress:
-
- - It has a collection of moving window statistics implemented in
- `Bottleneck <http://pandas.pydata.org/developers.html#development-roadmap>`__
- - `Outstanding issues <https://github.com/pydata/pandas/issues?labels=timeseries&milestone=&page=1&state=open>`__
-
-Summarising, Pandas offers superior functionality due to its combination with the :py:class:`pandas.DataFrame`.
-
-An introduction for former users of :mod:`scikits.timeseries` is provided in the :ref:`migration guide <ref-scikits-migration>`.
\ No newline at end of file
| cc @jseabold, @glamp, @mwaskom, @kjordahl, @wrobstory
Feel free to suggest a better blurb if you'd like.
Replaces the `related libraries` section, which I feel is outdated.
closes #5804
| https://api.github.com/repos/pandas-dev/pandas/pulls/5829 | 2014-01-03T02:57:56Z | 2014-01-03T02:58:01Z | 2014-01-03T02:58:01Z | 2014-07-14T12:38:07Z |
DOC: add pandas-xlsxwriter-charts ipnb to cookbook | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 4c365455d1b03..b0b15410fb215 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -317,6 +317,10 @@ The :ref:`Plotting <visualization>` docs.
`Annotate a time-series plot #2
<http://stackoverflow.com/questions/17891493/annotating-points-from-a-pandas-dataframe-in-matplotlib-plot>`__
+`Generate Embedded plots in excel files using Pandas, Vincent and xlsxwriter
+<http://pandas-xlsxwriter-charts.readthedocs.org/en/latest/introduction.html>`__
+
+
Data In/Out
-----------
| cc @jmcnamara.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5828 | 2014-01-03T02:07:56Z | 2014-01-03T02:58:27Z | 2014-01-03T02:58:27Z | 2014-07-16T08:45:01Z |
DOC: Add example to extract docstring, and re-explain change to match. | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 38ba0b064c192..1b2c80f90f97b 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1029,7 +1029,7 @@ with more than one group returns a DataFrame with one column per group.
Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
-Elements that do not match return a row of ``NaN``s.
+Elements that do not match return a row filled with ``NaN``.
Thus, a Series of messy strings can be "converted" into a
like-indexed Series or DataFrame of cleaned-up or more useful strings,
without necessitating ``get()`` to access tuples or ``re.match`` objects.
@@ -1051,18 +1051,35 @@ can also be used.
Testing for Strings that Match or Contain a Pattern
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In previous versions, *extracting* match groups was accomplished by ``match``,
-which returned a not-so-convenient Series of tuples. Starting in version 0.14,
-the default behavior of match will change. It will return a boolean
-indexer, analagous to the method ``contains``.
-The distinction between
-``match`` and ``contains`` is strictness: ``match`` relies on
-strict ``re.match`` while ``contains`` relies on ``re.search``.
+You can check whether elements contain a pattern:
-In version 0.13, ``match`` performs its old, deprecated behavior by default,
-but the new behavior is availabe through the keyword argument
-``as_indexer=True``.
+.. ipython:: python
+
+ pattern = r'[a-z][0-9]'
+ Series(['1', '2', '3a', '3b', '03c']).contains(pattern)
+
+or match a pattern:
+
+
+.. ipython:: python
+
+ Series(['1', '2', '3a', '3b', '03c']).match(pattern, as_indexer=True)
+
+The distinction between ``match`` and ``contains`` is strictness: ``match``
+relies on strict ``re.match``, while ``contains`` relies on ``re.search``.
+
+.. warning::
+
+ In previous versions, ``match`` was for *extracting* groups,
+ returning a not-so-convenient Series of tuples. The new method ``extract``
+ (described in the previous section) is now preferred.
+
+ This old, deprecated behavior of ``match`` is still the default. As
+ demonstrated above, use the new behavior by setting ``as_indexer=True``.
+ In this mode, ``match`` is analagous to ``contains``, returning a boolean
+ Series. The new behavior will become the default behavior in a future
+ release.
Methods like ``match``, ``contains``, ``startswith``, and ``endswith`` take
an extra ``na`` arguement so missing values can be considered True or False:
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 3b1b220d3fac7..02f422bb0b635 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -164,6 +164,11 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan):
Returns
-------
+ Series of boolean values
+
+ See Also
+ --------
+ match : analagous, but stricter, relying on re.match instead of re.search
"""
if not case:
@@ -326,11 +331,22 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=False):
as_indexer : False, by default, gives deprecated behavior better achieved
using str_extract. True return boolean indexer.
+ Returns
+ -------
+ boolean Series
+ if as_indexer=True
+ Series of tuples
+ if as_indexer=False, default but deprecated
Returns
-------
- matches : boolean array (if as_indexer=True)
- matches : array of tuples (if as_indexer=False, default but deprecated)
+ Series of boolean values
+
+ See Also
+ --------
+ contains : analagous, but less strict, relying on re.search instead of
+ re.match
+ extract : now preferred to the deprecated usage of match (as_indexer=False)
Notes
-----
@@ -385,10 +401,27 @@ def str_extract(arr, pat, flags=0):
-------
extracted groups : Series (one group) or DataFrame (multiple groups)
+ Examples
+ --------
+ A pattern with one group will return a Series. Non-matches will be NaN.
- Notes
- -----
- Compare to the string method match, which returns re.match objects.
+ >>> Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)')
+ 0 1
+ 1 2
+ 2 NaN
+ dtype: object
+
+ A pattern with more than one group will return a DataFrame.
+
+ >>> Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
+
+ A pattern may contain optional groups.
+
+ >>> Series(['a1', 'b2', 'c3']).str.extract('([ab])?(\d)')
+
+ Named groups will become column names in the result.
+
+ >>> Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)')
"""
regex = re.compile(pat, flags=flags)
| @jreback, I added examples per your request in #5099. Also, I rewrote the explanation of how `str.match` is changing, which I left confusing and wordy before.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5826 | 2014-01-02T21:57:20Z | 2014-01-03T15:24:28Z | 2014-01-03T15:24:28Z | 2014-07-16T08:44:59Z |
BUG: dropna dtype comp issue related (GH5815) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 36cfbb524ab31..5544cd0b34e3c 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3954,7 +3954,7 @@ def count(self, axis=0, level=None, numeric_only=False):
else:
result = notnull(frame).sum(axis=axis)
- return result
+ return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
| fixes dtype issue on 32-bit, related #5815
| https://api.github.com/repos/pandas-dev/pandas/pulls/5820 | 2014-01-02T11:54:16Z | 2014-01-02T12:09:55Z | 2014-01-02T12:09:55Z | 2014-06-19T08:09:48Z |
BUG: Fix DatetimeIndex.insert() with strings. | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0b25d0f6aa61a..95f7f7e71b89d 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -73,6 +73,7 @@ Bug Fixes
~~~~~~~~~
- Bug in Series replace with timestamp dict (:issue:`5797`)
- read_csv/read_table now respects the `prefix` kwarg (:issue:`5732`).
+ - Bug with insert of strings into DatetimeIndex (:issue:`5818`, :issue:`5819`)
pandas 0.13.0
-------------
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index bfddd2e78c322..9a9e3caa96c5b 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1455,7 +1455,6 @@ def _safe_append_to_index(index, key):
# raise here as this is basically an unsafe operation and we want
# it to be obvious that you are doing something wrong
-
raise ValueError("unsafe appending to index of type {0} with a key "
"{1}".format(index.__class__.__name__, key))
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index bd05a7093fd7c..70f1e50f475bb 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -1084,8 +1084,10 @@ def test_icol(self):
def test_set_value(self):
- # this is invalid because it is not a valid type for this index
- self.assertRaises(ValueError, self.frame.set_value, 'foobar', 'B', 1.5)
+ # ok as the index gets conver to object
+ frame = self.frame.copy()
+ res = frame.set_value('foobar', 'B', 1.5)
+ self.assert_(res.index.dtype == 'object')
res = self.frame
res.index = res.index.astype(object)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index a1ef94d8400da..548f49e23a035 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -10897,6 +10897,19 @@ def test_reset_index_multiindex_col(self):
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
+ def test_reset_index_with_datetimeindex_cols(self):
+ # GH5818
+ #
+ df = pd.DataFrame([[1, 2], [3, 4]],
+ columns=pd.date_range('1/1/2013', '1/2/2013'),
+ index=['A', 'B'])
+
+ result = df.reset_index()
+ expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
+ columns=['index', datetime(2013, 1, 1),
+ datetime(2013, 1, 2)])
+ assert_frame_equal(result, expected)
+
#----------------------------------------------------------------------
# Tests to cope with refactored internals
def test_as_matrix_numeric_cols(self):
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index fe3aac0e9eeaa..ee57902bdeb5f 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1665,15 +1665,13 @@ def test_partial_set_invalid(self):
df = tm.makeTimeDataFrame()
+ # don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(ValueError, f)
def f():
df.loc[100,:] = df.ix[0]
self.assertRaises(ValueError, f)
- def f():
- df.loc['a',:] = df.ix[0]
- self.assertRaises(ValueError, f)
def f():
df.ix[100.0, :] = df.ix[0]
@@ -1682,6 +1680,9 @@ def f():
df.ix[100,:] = df.ix[0]
self.assertRaises(ValueError, f)
+ # allow object conversion here
+ df.loc['a',:] = df.ix[0]
+
def test_partial_set_empty(self):
# GH5226
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 6779e1a61c081..8cf11dd921abf 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -1533,6 +1533,8 @@ def insert(self, loc, item):
----------
loc : int
item : object
+ if not either a Python datetime or a numpy integer-like, returned
+ Index dtype will be object rather than datetime.
Returns
-------
@@ -1540,11 +1542,17 @@ def insert(self, loc, item):
"""
if isinstance(item, datetime):
item = _to_m8(item, tz=self.tz)
-
- new_index = np.concatenate((self[:loc].asi8,
+ try:
+ new_index = np.concatenate((self[:loc].asi8,
[item.view(np.int64)],
self[loc:].asi8))
- return DatetimeIndex(new_index, freq='infer')
+ return DatetimeIndex(new_index, freq='infer')
+ except (AttributeError, TypeError):
+
+ # fall back to object index
+ if isinstance(item,compat.string_types):
+ return self.asobject.insert(loc, item)
+ raise TypeError("cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
@@ -1585,7 +1593,7 @@ def tz_convert(self, tz):
def tz_localize(self, tz, infer_dst=False):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz)
-
+
Parameters
----------
tz : string or pytz.timezone
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index f4dcdb7a44a3e..4dfe05e38458a 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -2099,6 +2099,13 @@ def test_insert(self):
'2000-01-02'])
self.assert_(result.equals(exp))
+ # insertion of non-datetime should coerce to object index
+ result = idx.insert(1, 'inserted')
+ expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
+ datetime(2000, 1, 2)])
+ self.assert_(not isinstance(result, DatetimeIndex))
+ tm.assert_index_equal(result, expected)
+
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assert_(result.freqstr == 'M')
| Falls back to object Index instead. (previously wasn't checking for them), but _only_ strings are allowed.
Fixes #5818.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5819 | 2014-01-02T07:14:00Z | 2014-01-24T22:20:52Z | 2014-01-24T22:20:51Z | 2014-07-09T11:59:16Z |
CLN: Make io/data urls easier to monkey-patch | diff --git a/pandas/io/data.py b/pandas/io/data.py
index 98ac860c391c8..b3332df3c8866 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -107,6 +107,9 @@ def _in_chunks(seq, size):
'time': 't1', 'short_ratio': 's7'}
+_YAHOO_QUOTE_URL = 'http://finance.yahoo.com/d/quotes.csv?'
+
+
def get_quote_yahoo(symbols):
"""
Get current yahoo quote
@@ -124,8 +127,7 @@ def get_quote_yahoo(symbols):
data = defaultdict(list)
- url_str = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (sym_list,
- request)
+ url_str = _YAHOO_QUOTE_URL + 's=%s&f=%s' % (sym_list, request)
with urlopen(url_str) as url:
lines = url.readlines()
@@ -175,6 +177,9 @@ def _retry_read_url(url, retry_count, pause, name):
"return a 200 for url %r" % (retry_count, name, url))
+_HISTORICAL_YAHOO_URL = 'http://ichart.finance.yahoo.com/table.csv?'
+
+
def _get_hist_yahoo(sym, start, end, retry_count, pause):
"""
Get historical data for the given name from yahoo.
@@ -183,8 +188,7 @@ def _get_hist_yahoo(sym, start, end, retry_count, pause):
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
- yahoo_url = 'http://ichart.finance.yahoo.com/table.csv?'
- url = (yahoo_url + 's=%s' % sym +
+ url = (_HISTORICAL_YAHOO_URL + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
'&c=%s' % start.year +
@@ -196,6 +200,9 @@ def _get_hist_yahoo(sym, start, end, retry_count, pause):
return _retry_read_url(url, retry_count, pause, 'Yahoo!')
+_HISTORICAL_GOOGLE_URL = 'http://www.google.com/finance/historical?'
+
+
def _get_hist_google(sym, start, end, retry_count, pause):
"""
Get historical data for the given name from google.
@@ -204,13 +211,13 @@ def _get_hist_google(sym, start, end, retry_count, pause):
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
- google_URL = 'http://www.google.com/finance/historical?'
# www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
- url = google_URL + urlencode({"q": sym,
- "startdate": start.strftime('%b %d, ' '%Y'),
- "enddate": end.strftime('%b %d, %Y'),
- "output": "csv"})
+ url = "%s%s" % (_HISTORICAL_GOOGLE_URL,
+ urlencode({"q": sym,
+ "startdate": start.strftime('%b %d, ' '%Y'),
+ "enddate": end.strftime('%b %d, %Y'),
+ "output": "csv"}))
return _retry_read_url(url, retry_count, pause, 'Google')
@@ -251,6 +258,9 @@ def _calc_return_index(price_df):
return df
+_YAHOO_COMPONENTS_URL = 'http://download.finance.yahoo.com/d/quotes.csv?'
+
+
def get_components_yahoo(idx_sym):
"""
Returns DataFrame containing list of component information for
@@ -275,8 +285,7 @@ def get_components_yahoo(idx_sym):
stats = 'snx'
# URL of form:
# http://download.finance.yahoo.com/d/quotes.csv?s=@%5EIXIC&f=snxl1d1t1c1ohgv
- url = ('http://download.finance.yahoo.com/d/quotes.csv?s={0}&f={1}'
- '&e=.csv&h={2}')
+ url = _YAHOO_COMPONENTS_URL + 's={0}&f={1}&e=.csv&h={2}'
idx_mod = idx_sym.replace('^', '@%5E')
url_str = url.format(idx_mod, stats, 1)
@@ -430,6 +439,9 @@ def get_data_google(symbols=None, start=None, end=None, retry_count=3,
adjust_price, ret_index, chunksize, 'google', name)
+_FRED_URL = "http://research.stlouisfed.org/fred2/series/"
+
+
def get_data_fred(name, start=dt.datetime(2010, 1, 1),
end=dt.datetime.today()):
"""
@@ -443,14 +455,12 @@ def get_data_fred(name, start=dt.datetime(2010, 1, 1),
"""
start, end = _sanitize_dates(start, end)
- fred_URL = "http://research.stlouisfed.org/fred2/series/"
-
if not is_list_like(name):
names = [name]
else:
names = name
- urls = [fred_URL + '%s' % n + '/downloaddata/%s' % n + '.csv' for
+ urls = [_FRED_URL + '%s' % n + '/downloaddata/%s' % n + '.csv' for
n in names]
def fetch_data(url, name):
@@ -470,11 +480,12 @@ def fetch_data(url, name):
return df
+_FAMAFRENCH_URL = 'http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp'
+
+
def get_data_famafrench(name):
# path of zip files
- zip_file_url = ('http://mba.tuck.dartmouth.edu/pages/faculty/'
- 'ken.french/ftp')
- zip_file_path = '{0}/{1}.zip'.format(zip_file_url, name)
+ zip_file_path = '{0}/{1}.zip'.format(_FAMAFRENCH_URL, name)
with urlopen(zip_file_path) as url:
raw = url.read()
@@ -618,10 +629,12 @@ def get_options_data(self, month=None, year=None, expiry=None):
return [f(month, year, expiry) for f in (self.get_put_data,
self.get_call_data)]
+ _OPTIONS_BASE_URL = 'http://finance.yahoo.com/q/op?s={sym}'
+
def _get_option_data(self, month, year, expiry, table_loc, name):
year, month = self._try_parse_dates(year, month, expiry)
- url = 'http://finance.yahoo.com/q/op?s={sym}'.format(sym=self.symbol)
+ url = self._OPTIONS_BASE_URL.format(sym=self.symbol)
if month and year: # try to get specified month from yahoo finance
m1, m2 = _two_char_month(month), month
| This could be useful to make pandas more resilient to basic url changes
like that which happened with yahoo finance. That said, clearly
wholesale API changes won't be helped by this.
What do you all think, worth it to make this relatively trivial change?
| https://api.github.com/repos/pandas-dev/pandas/pulls/5817 | 2014-01-02T00:46:13Z | 2014-01-03T00:06:25Z | 2014-01-03T00:06:25Z | 2014-07-16T08:44:56Z |
PERF: perf issue with dropna on frame | diff --git a/doc/source/release.rst b/doc/source/release.rst
index c0e155372760f..0074d3b359cbe 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -67,6 +67,7 @@ Improvements to existing features
- perf improvements in Series datetime/timedelta binary operations (:issue:`5801`)
- `option_context` context manager now available as top-level API (:issue:`5752`)
- df.info() view now display dtype info per column (:issue: `5682`)
+ - perf improvements in DataFrame ``count/dropna`` for ``axis=1``
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 97c284fb75a43..36cfbb524ab31 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3952,7 +3952,7 @@ def count(self, axis=0, level=None, numeric_only=False):
counts = notnull(frame.values).sum(1)
result = Series(counts, index=frame._get_agg_axis(axis))
else:
- result = DataFrame.apply(frame, Series.count, axis=axis)
+ result = notnull(frame).sum(axis=axis)
return result
diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py
index ee4d876d20233..fd03d512125e7 100644
--- a/vb_suite/frame_methods.py
+++ b/vb_suite/frame_methods.py
@@ -289,6 +289,33 @@ def f(K=100):
frame_isnull = Benchmark('isnull(df)', setup,
start_date=datetime(2012,1,1))
+## dropna
+setup = common_setup + """
+data = np.random.randn(10000, 1000)
+df = DataFrame(data)
+df.ix[50:1000,20:50] = np.nan
+df.ix[2000:3000] = np.nan
+df.ix[:,60:70] = np.nan
+"""
+frame_dropna_axis0_any = Benchmark('df.dropna(how="any",axis=0)', setup,
+ start_date=datetime(2012,1,1))
+frame_dropna_axis0_all = Benchmark('df.dropna(how="all",axis=0)', setup,
+ start_date=datetime(2012,1,1))
+
+setup = common_setup + """
+data = np.random.randn(10000, 1000)
+df = DataFrame(data)
+df.ix[50:1000,20:50] = np.nan
+df.ix[2000:3000] = np.nan
+df.ix[:,60:70] = np.nan
+"""
+frame_dropna_axis1_any = Benchmark('df.dropna(how="any",axis=1)', setup,
+ start_date=datetime(2012,1,1))
+
+frame_dropna_axis1_all = Benchmark('df.dropna(how="all",axis=1)', setup,
+ start_date=datetime(2012,1,1))
+
+
#----------------------------------------------------------------------
# apply
@@ -298,3 +325,4 @@ def f(K=100):
"""
frame_apply_user_func = Benchmark('df.apply(lambda x: np.corrcoef(x,s)[0,1])', setup,
start_date=datetime(2012,1,1))
+
| took out the apply on `count` and just compute directly
```
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
frame_dropna_axis1_any | 147.5154 | 334.2137 | 0.4414 |
frame_dropna_axis1_all | 251.1443 | 437.9021 | 0.5735 |
frame_dropna_axis0_all | 80.6900 | 80.8613 | 0.9979 |
frame_dropna_axis0_any | 58.6040 | 54.6887 | 1.0716 |
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
Target [c6e300d] : PERF: perf issue with dropna on frame
Base [5e176a9] : Merge pull request #5738 from y-p/PR_json_pr_ver
BLD: ci/print_versions.py learned to output json
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5815 | 2014-01-01T18:50:03Z | 2014-01-01T19:13:26Z | 2014-01-01T19:13:26Z | 2014-07-16T08:44:54Z |
DOC: add way to document DatetimeIndex field attributes | diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 23b949c1fedfb..6779e1a61c081 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -36,7 +36,7 @@ def _utc():
# -------- some conversion wrapper functions
-def _field_accessor(name, field):
+def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
@@ -45,6 +45,7 @@ def f(self):
values = self._local_timestamps()
return tslib.get_date_field(values, field)
f.__name__ = name
+ f.__doc__ = docstring
return property(f)
@@ -1398,7 +1399,7 @@ def freqstr(self):
return self.offset.freqstr
year = _field_accessor('year', 'Y')
- month = _field_accessor('month', 'M')
+ month = _field_accessor('month', 'M', "The month as January=1, December=12")
day = _field_accessor('day', 'D')
hour = _field_accessor('hour', 'h')
minute = _field_accessor('minute', 'm')
@@ -1407,7 +1408,8 @@ def freqstr(self):
nanosecond = _field_accessor('nanosecond', 'ns')
weekofyear = _field_accessor('weekofyear', 'woy')
week = weekofyear
- dayofweek = _field_accessor('dayofweek', 'dow')
+ dayofweek = _field_accessor('dayofweek', 'dow',
+ "The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy')
quarter = _field_accessor('quarter', 'q')
| Ping @rockg. The docstring is now listed in the autosummary table in api.rst.
Related to issue #5813 (but does not closes it, this only documents the DatetimeIndex field in the api.rst, not more general the Timestamp values).
I added the attribute docstring within the `_field_accessor` function. Is this a good approach?
You can also document attributes with a docstring line beneath the definition (see Sphinx docs: http://sphinx-doc.org/ext/autodoc.html#directive-autoattribute). The problem with this is that, for the moment, this works for the sphinx autodoc (so the generated pages), but not for the autosummary (there is an open PR for this: https://bitbucket.org/birkenfeld/sphinx/pull-request/142/make-autosummary-work-with-module-class/diff).
| https://api.github.com/repos/pandas-dev/pandas/pulls/5814 | 2014-01-01T17:11:07Z | 2014-01-02T20:17:17Z | 2014-01-02T20:17:17Z | 2014-06-29T12:30:55Z |
BUG: Yahoo finance changed chart base url. Updated _get_hist_yahoo | diff --git a/pandas/io/data.py b/pandas/io/data.py
index a3968446930e8..98ac860c391c8 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -183,7 +183,7 @@ def _get_hist_yahoo(sym, start, end, retry_count, pause):
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
- yahoo_url = 'http://ichart.yahoo.com/table.csv?'
+ yahoo_url = 'http://ichart.finance.yahoo.com/table.csv?'
url = (yahoo_url + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
| The start of the old url was: `http://ichart.yahoo.com/` and yahoo now uses `http://ichart.finance.yahoo.com/`
| https://api.github.com/repos/pandas-dev/pandas/pulls/5812 | 2013-12-31T21:50:44Z | 2014-01-01T18:52:09Z | 2014-01-01T18:52:09Z | 2014-06-12T19:28:36Z |
BUG: fix issue of boolean comparison on empty DataFrames (GH5808) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0150b233110a7..425f6dfe36990 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -74,6 +74,7 @@ Bug Fixes
- Bug in Series replace with timestamp dict (:issue:`5797`)
- read_csv/read_table now respects the `prefix` kwarg (:issue:`5732`).
- Bug in selection with missing values via ``.ix`` from a duplicate indexed DataFrame failing (:issue:`5835`)
+ - Fix issue of boolean comparison on empty DataFrames (:issue:`5808`)
pandas 0.13.0
-------------
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index b8f988e38f14b..a0e274b952817 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -736,11 +736,16 @@ def na_op(x, y):
result = np.empty(x.size, dtype=dtype)
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
- result[mask] = op(xrav[mask], yrav[mask])
+ xrav = xrav[mask]
+ yrav = yrav[mask]
+ if np.prod(xrav.shape) and np.prod(yrav.shape):
+ result[mask] = op(xrav, yrav)
else:
result = np.empty(x.size, dtype=x.dtype)
mask = notnull(xrav)
- result[mask] = op(xrav[mask], y)
+ xrav = xrav[mask]
+ if np.prod(xrav.shape):
+ result[mask] = op(xrav, y)
result, changed = com._maybe_upcast_putmask(result, -mask, np.nan)
result = result.reshape(x.shape)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index a1ef94d8400da..ef6990337bbbb 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4392,6 +4392,41 @@ def test_operators(self):
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
+ def test_operators_boolean(self):
+
+ # GH 5808
+ # empty frames, non-mixed dtype
+
+ result = DataFrame(index=[1]) & DataFrame(index=[1])
+ assert_frame_equal(result,DataFrame(index=[1]))
+
+ result = DataFrame(index=[1]) | DataFrame(index=[1])
+ assert_frame_equal(result,DataFrame(index=[1]))
+
+ result = DataFrame(index=[1]) & DataFrame(index=[1,2])
+ assert_frame_equal(result,DataFrame(index=[1,2]))
+
+ result = DataFrame(index=[1],columns=['A']) & DataFrame(index=[1],columns=['A'])
+ assert_frame_equal(result,DataFrame(index=[1],columns=['A']))
+
+ result = DataFrame(True,index=[1],columns=['A']) & DataFrame(True,index=[1],columns=['A'])
+ assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
+
+ result = DataFrame(True,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
+ assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
+
+ # boolean ops
+ result = DataFrame(1,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
+ assert_frame_equal(result,DataFrame(1,index=[1],columns=['A']))
+
+ def f():
+ DataFrame(1.0,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
+ self.assertRaises(TypeError, f)
+
+ def f():
+ DataFrame('foo',index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
+ self.assertRaises(TypeError, f)
+
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
| closes #5808
| https://api.github.com/repos/pandas-dev/pandas/pulls/5810 | 2013-12-31T15:46:31Z | 2014-01-04T22:30:39Z | 2014-01-04T22:30:39Z | 2014-06-18T06:21:16Z |
BUG: Series replace values using timestamps in a dict GH5797 | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 0074d3b359cbe..1b32f385d3ab9 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -71,7 +71,7 @@ Improvements to existing features
Bug Fixes
~~~~~~~~~
-
+ - Bug in Series replace with timestamp dict (:issue:`5797`)
pandas 0.13.0
-------------
@@ -861,7 +861,7 @@ Bug Fixes
- Bug in fillna with Series and a passed series/dict (:issue:`5703`)
- Bug in groupby transform with a datetime-like grouper (:issue:`5712`)
- Bug in multi-index selection in PY3 when using certain keys (:issue:`5725`)
- - Row-wise concat of differeing dtypes failing in certain cases (:issue:`5754`)
+ - Row-wise concat of differing dtypes failing in certain cases (:issue:`5754`)
pandas 0.12.0
-------------
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index e76cf69eb420b..d636edeec0815 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2322,7 +2322,7 @@ def replace_list(self, src_lst, dest_lst, inplace=False, regex=False):
def comp(s):
if isnull(s):
return isnull(values)
- return values == s
+ return values == getattr(s, 'asm8', s)
masks = [comp(s) for i, s in enumerate(src_lst)]
result_blocks = []
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 71ed0283fc3d0..e8b421608fc0a 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5189,6 +5189,16 @@ def test_replace(self):
expected = ser.ffill()
result = ser.replace(np.nan)
assert_series_equal(result, expected)
+ #GH 5797
+ ser = Series(date_range('20130101', periods=5))
+ expected = ser.copy()
+ expected.loc[2] = Timestamp('20120101')
+ result = ser.replace({Timestamp('20130103'):
+ Timestamp('20120101')})
+ assert_series_equal(result, expected)
+ result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))
+ assert_series_equal(result, expected)
+
def test_replace_with_single_list(self):
ser = Series([0, 1, 2, 3, 4])
| This fixes issue #5797.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5806 | 2013-12-31T14:29:00Z | 2014-01-02T22:58:45Z | 2014-01-02T22:58:45Z | 2014-06-16T14:08:41Z |
BLD: version strings should be updated only when tagging new release | diff --git a/setup.py b/setup.py
index 608532e919627..497c6a5644def 100755
--- a/setup.py
+++ b/setup.py
@@ -189,7 +189,7 @@ def build_extensions(self):
]
MAJOR = 0
-MINOR = 14
+MINOR = 13
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
| 0.13.0 was the first time I tagged a release and I got it wrong.
Version strings are bumped only when tagging a new release.
```
% grh v0.13.0rc1^
% sudo python ./setup.py develop
% cat pandas/version.py
version = '0.12.0-1189-gd9b3340'
short_version = '0.12.0'
```
It makes sense since it leaves the next version undetermined until it happens.
Keeping the status-quo for now, until we agree on something better.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5803 | 2013-12-31T00:37:30Z | 2013-12-31T00:37:43Z | 2013-12-31T00:37:43Z | 2014-06-15T18:45:46Z |
PERF: fix infer_dtype to properly infer a Series (GH5801) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 78c92ec11609e..7109b87f5352b 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -64,6 +64,8 @@ Experimental Features
Improvements to existing features
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ - perf improvements in Series datetime/timedelta binary operations (:issue:`5801`)
+
Bug Fixes
~~~~~~~~~
diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx
index dce46c972fb3b..84f1f3cb4904d 100644
--- a/pandas/src/inference.pyx
+++ b/pandas/src/inference.pyx
@@ -35,6 +35,8 @@ def infer_dtype(object _values):
if isinstance(_values, np.ndarray):
values = _values
+ elif hasattr(_values,'values'):
+ values = _values.values
else:
if not isinstance(_values, list):
_values = list(_values)
diff --git a/vb_suite/binary_ops.py b/vb_suite/binary_ops.py
index fc84dd8bcdb81..5ec2d9fcfc2cf 100644
--- a/vb_suite/binary_ops.py
+++ b/vb_suite/binary_ops.py
@@ -103,6 +103,9 @@
Benchmark("df[(df>0) & (df2>0)]", setup, name='frame_multi_and_no_ne',cleanup="expr.set_use_numexpr(True)",
start_date=datetime(2013, 2, 26))
+#----------------------------------------------------------------------
+# timeseries
+
setup = common_setup + """
N = 1000000
halfway = N // 2 - 1
@@ -114,3 +117,13 @@
start_date=datetime(2013, 9, 27))
series_timestamp_compare = Benchmark("s <= ts", setup,
start_date=datetime(2012, 2, 21))
+
+setup = common_setup + """
+N = 1000000
+s = Series(date_range('20010101', periods=N, freq='s'))
+"""
+
+timestamp_ops_diff1 = Benchmark("s.diff()", setup,
+ start_date=datetime(2013, 1, 1))
+timestamp_ops_diff2 = Benchmark("s-s.shift()", setup,
+ start_date=datetime(2013, 1, 1))
| closes #5801
```
------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
timestamp_ops_diff2 | 21.7124 | 2472.3583 | 0.0088 |
-------------------------------------------------------------------------------
Test name | head[ms] | base[ms] | ratio |
-------------------------------------------------------------------------------
Ratio < 1.0 means the target commit is faster then the baseline.
Seed used: 1234
```
really though this was in their before....oh well
| https://api.github.com/repos/pandas-dev/pandas/pulls/5802 | 2013-12-30T22:20:36Z | 2013-12-30T22:54:10Z | 2013-12-30T22:54:10Z | 2014-06-15T19:45:24Z |
COMPAT: back compat for HDFStore with a Term | diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py
index 7716bc0051159..bf477cd71df62 100644
--- a/pandas/computation/pytables.py
+++ b/pandas/computation/pytables.py
@@ -4,8 +4,8 @@
import time
import warnings
from functools import partial
-from datetime import datetime
-
+from datetime import datetime, timedelta
+import numpy as np
import pandas as pd
from pandas.compat import u, string_types, PY3
from pandas.core.base import StringMixin
@@ -540,6 +540,18 @@ def parse_back_compat(self, w, op=None, value=None):
if value is not None:
if isinstance(value, Expr):
raise TypeError("invalid value passed, must be a string")
+
+ # stringify with quotes these values
+ def convert(v):
+ if isinstance(v, (datetime,np.datetime64,timedelta,np.timedelta64)) or hasattr(v, 'timetuple'):
+ return "'{0}'".format(v)
+ return v
+
+ if isinstance(value, (list,tuple)):
+ value = [ convert(v) for v in value ]
+ else:
+ value = convert(value)
+
w = "{0}{1}".format(w, value)
warnings.warn("passing multiple values to Expr is deprecated, "
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index c9955b1ae2fb2..5fcafdc295c5c 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -2347,6 +2347,30 @@ def test_term_compat(self):
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
+ with ensure_clean_store(self.path) as store:
+
+ wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
+ major_axis=date_range('1/1/2000', periods=5),
+ minor_axis=['A', 'B', 'C', 'D'])
+ store.append('wp',wp)
+
+ # stringified datetimes
+ result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2))])
+ expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
+ assert_panel_equal(result, expected)
+
+ result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2,0,0))])
+ expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
+ assert_panel_equal(result, expected)
+
+ result = store.select('wp', [Term('major_axis','=',[datetime.datetime(2000,1,2,0,0),datetime.datetime(2000,1,3,0,0)])])
+ expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
+ assert_panel_equal(result, expected)
+
+ result = store.select('wp', [Term('minor_axis','=',['A','B'])])
+ expected = wp.loc[:,:,['A','B']]
+ assert_panel_equal(result, expected)
+
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
| https://api.github.com/repos/pandas-dev/pandas/pulls/5794 | 2013-12-30T14:55:50Z | 2013-12-30T14:55:55Z | 2013-12-30T14:55:55Z | 2014-06-26T12:05:55Z | |
DOC: fix minor doc build warnings | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9aa6c68e9de8c..fa967a8e237d1 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -248,7 +248,7 @@ API Changes
- allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when
the single-key is not currently contained in the index for that axis
(:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`,
- :issue:`5744`, :issue:`5756`)
+ :issue:`5744`, :issue:`5756`)
- Default export for ``to_clipboard`` is now csv with a sep of `\t` for
compat (:issue:`3368`)
- ``at`` now will enlarge the object inplace (and return the same)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index e07655b0539a5..61235862534f0 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1852,7 +1852,7 @@ def fillna(self, value=None, method=None, axis=0, inplace=False,
limit : int, default None
Maximum size gap to forward or backward fill
downcast : dict, default is None
- a dict of item->dtype of what to downcast if possible,
+ a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
| Two minor space-errors which caused a doc build warning.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5792 | 2013-12-30T09:44:34Z | 2013-12-30T11:27:19Z | 2013-12-30T11:27:19Z | 2014-07-16T08:44:39Z |
TST: aggregate_item_by_item test failure (GH5782) | diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 9d7e90e5f8f32..34c8869f72a53 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -583,8 +583,11 @@ def test_aggregate_item_by_item(self):
foo = (self.df.A == 'foo').sum()
bar = (self.df.A == 'bar').sum()
K = len(result.columns)
- assert_almost_equal(result.xs('foo'), [foo] * K)
- assert_almost_equal(result.xs('bar'), [bar] * K)
+
+ # GH5782
+ # odd comparisons can result here, so cast to make easy
+ assert_almost_equal(result.xs('foo'), np.array([foo] * K).astype('float64'))
+ assert_almost_equal(result.xs('bar'), np.array([bar] * K).astype('float64'))
def aggfun(ser):
return ser.size
| closes #5782
| https://api.github.com/repos/pandas-dev/pandas/pulls/5791 | 2013-12-29T23:34:43Z | 2013-12-29T23:35:06Z | 2013-12-29T23:35:06Z | 2014-06-19T10:35:39Z |
BUG: dont' always coerce reductions in a groupby always to datetimes | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 08061b1d14863..a9b56b6905b6b 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1527,17 +1527,22 @@ def _possibly_convert_objects(values, convert_dates=True,
values, convert_datetime=convert_dates)
# convert to numeric
- if convert_numeric and values.dtype == np.object_:
- try:
- new_values = lib.maybe_convert_numeric(
- values, set(), coerce_numeric=True)
+ if values.dtype == np.object_:
+ if convert_numeric:
+ try:
+ new_values = lib.maybe_convert_numeric(
+ values, set(), coerce_numeric=True)
- # if we are all nans then leave me alone
- if not isnull(new_values).all():
- values = new_values
+ # if we are all nans then leave me alone
+ if not isnull(new_values).all():
+ values = new_values
- except:
- pass
+ except:
+ pass
+ else:
+
+ # soft-conversion
+ values = lib.maybe_convert_objects(values)
return values
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 182f75e53ca5d..fb9b5e7831c88 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -22,6 +22,7 @@
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype)
+from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.algos as _algos
@@ -2243,16 +2244,19 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
try:
if self.axis == 0:
- stacked_values = np.vstack([np.asarray(x)
- for x in values])
- columns = v.index
- index = key_index
+ # normally use vstack as its faster than concat
+ # and if we have mi-columns
+ if not _np_version_under1p7 or isinstance(v.index,MultiIndex):
+ stacked_values = np.vstack([np.asarray(x) for x in values])
+ result = DataFrame(stacked_values,index=key_index,columns=v.index)
+ else:
+ # GH5788 instead of stacking; concat gets the dtypes correct
+ from pandas.tools.merge import concat
+ result = concat(values,keys=key_index,names=key_index.names,
+ axis=self.axis).unstack()
else:
- stacked_values = np.vstack([np.asarray(x)
- for x in values]).T
-
- index = v.index
- columns = key_index
+ stacked_values = np.vstack([np.asarray(x) for x in values])
+ result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
@@ -2261,15 +2265,14 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
- cd = True
- if self.obj.ndim == 2 and self.obj.dtypes.isin(_DATELIKE_DTYPES).any():
- cd = 'coerce'
- return DataFrame(stacked_values, index=index,
- columns=columns).convert_objects(convert_dates=cd, convert_numeric=True)
+ cd = 'coerce' if self.obj.ndim == 2 and self.obj.dtypes.isin(_DATELIKE_DTYPES).any() else True
+ return result.convert_objects(convert_dates=cd)
else:
- return Series(values, index=key_index).convert_objects(
- convert_dates='coerce',convert_numeric=True)
+ # only coerce dates if we find at least 1 datetime
+ cd = 'coerce' if any([ isinstance(v,Timestamp) for v in values ]) else False
+ return Series(values, index=key_index).convert_objects(convert_dates=cd)
+
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 4e657ca343c12..e76cf69eb420b 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -3556,12 +3556,14 @@ def _consolidate_inplace(self):
pass
-def construction_error(tot_items, block_shape, axes):
+def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
- raise ValueError("Shape of passed values is %s, indices imply %s" % (
- tuple(map(int, [tot_items] + list(block_shape))),
- tuple(map(int, [len(ax) for ax in axes]))))
-
+ passed = tuple(map(int, [tot_items] + list(block_shape)))
+ implied = tuple(map(int, [len(ax) for ax in axes]))
+ if passed == implied and e is not None:
+ raise e
+ raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
+ passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
@@ -3576,10 +3578,10 @@ def create_block_manager_from_blocks(blocks, axes):
mgr._consolidate_inplace()
return mgr
- except (ValueError):
+ except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
- construction_error(tot_items, blocks[0].shape[1:], axes)
+ construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
@@ -3588,8 +3590,8 @@ def create_block_manager_from_arrays(arrays, names, axes):
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
- except (ValueError):
- construction_error(len(arrays), arrays[0].shape[1:], axes)
+ except (ValueError) as e:
+ construction_error(len(arrays), arrays[0].shape[1:], axes, e)
def maybe_create_block_in_items_map(im, block):
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 7e54aa4e0813f..9d7e90e5f8f32 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -28,7 +28,7 @@
import pandas.core.nanops as nanops
import pandas.util.testing as tm
-
+import pandas as pd
def commonSetUp(self):
self.dateRange = bdate_range('1/1/2005', periods=250)
@@ -481,6 +481,36 @@ def test_apply_describe_bug(self):
grouped = self.mframe.groupby(level='first')
result = grouped.describe() # it works!
+ def test_apply_issues(self):
+ # GH 5788
+
+ s="""2011.05.16,00:00,1.40893
+2011.05.16,01:00,1.40760
+2011.05.16,02:00,1.40750
+2011.05.16,03:00,1.40649
+2011.05.17,02:00,1.40893
+2011.05.17,03:00,1.40760
+2011.05.17,04:00,1.40750
+2011.05.17,05:00,1.40649
+2011.05.18,02:00,1.40893
+2011.05.18,03:00,1.40760
+2011.05.18,04:00,1.40750
+2011.05.18,05:00,1.40649"""
+
+ df = pd.read_csv(StringIO(s), header=None, names=['date', 'time', 'value'], parse_dates=[['date', 'time']])
+ df = df.set_index('date_time')
+
+ expected = df.groupby(df.index.date).idxmax()
+ result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
+ assert_frame_equal(result,expected)
+
+ # GH 5789
+ # don't auto coerce dates
+ df = pd.read_csv(StringIO(s), header=None, names=['date', 'time', 'value'])
+ expected = Series(['00:00','02:00','02:00'],index=['2011.05.16','2011.05.17','2011.05.18'])
+ result = df.groupby('date').apply(lambda x: x['time'][x['value'].idxmax()])
+ assert_series_equal(result,expected)
+
def test_len(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year,
| only when we have actual Timestamps in the data (GH5788,GH5789)
closes #5789
TST: tests for idxmax used in an apply
closes #5788
| https://api.github.com/repos/pandas-dev/pandas/pulls/5790 | 2013-12-29T03:59:24Z | 2013-12-29T16:23:22Z | 2013-12-29T16:23:22Z | 2020-01-19T23:27:51Z |
Add idxmax/idxmin to groupby dispatch whitelist (#5786) | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index e8a9d6e49a066..182f75e53ca5d 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -65,6 +65,7 @@
'mad',
'any', 'all',
'irow', 'take',
+ 'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 942efdfc23740..7e54aa4e0813f 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -3264,6 +3264,7 @@ def test_groupby_whitelist(self):
'mad',
'any', 'all',
'irow', 'take',
+ 'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
@@ -3284,6 +3285,7 @@ def test_groupby_whitelist(self):
'mad',
'any', 'all',
'irow', 'take',
+ 'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
@@ -3413,7 +3415,7 @@ def test_tab_completion(self):
'resample', 'cummin', 'fillna', 'cumsum', 'cumcount',
'all', 'shift', 'skew', 'bfill', 'irow', 'ffill',
'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
- 'cov', 'dtypes', 'diff',
+ 'cov', 'dtypes', 'diff', 'idxmax', 'idxmin'
])
self.assertEqual(results, expected)
| Closes #5786.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5787 | 2013-12-28T23:25:08Z | 2013-12-28T23:49:11Z | 2013-12-28T23:49:11Z | 2014-06-21T19:00:13Z |
BLD: print_versions get uname() via cross-platform API | diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py
index 433b51b9c0e1e..c40366ec2d804 100644
--- a/pandas/util/print_versions.py
+++ b/pandas/util/print_versions.py
@@ -1,4 +1,5 @@
import os
+import platform
import sys
@@ -8,8 +9,12 @@ def show_versions():
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
- sysname, nodename, release, version, machine = os.uname()
- print("OS: %s %s %s %s" % (sysname, release, version, machine))
+ sysname, nodename, release, version, machine, processor = platform.uname()
+ print("OS: %s" % (sysname))
+ print("Release: %s" % (release))
+ #print("Version: %s" % (version))
+ #print("Machine: %s" % (machine))
+ print("Processor: %s" % (processor))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
| @jtratner, guess it could fail after all. Windows doesn't support os.uname().
| https://api.github.com/repos/pandas-dev/pandas/pulls/5784 | 2013-12-28T04:19:45Z | 2013-12-28T04:19:58Z | 2013-12-28T04:19:58Z | 2014-06-27T23:28:24Z |
TST: ensure_clean skips test when fs doesn't support unicode (sparc) | diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 0db221d224b45..85353a4a90f7b 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -354,6 +354,10 @@ def ensure_clean(filename=None, return_filelike=False):
try:
fd, filename = tempfile.mkstemp(suffix=filename)
+ except UnicodeEncodeError:
+ raise nose.SkipTest('no unicode file names on this system')
+
+ try:
yield filename
finally:
try:
| http://nipy.bic.berkeley.edu/builders/pandas-py2.x-sid-sparc/builds/365/steps/shell_4/logs/stdio
These checks were removed from the tests during a refactor to use ensure_clean
| https://api.github.com/repos/pandas-dev/pandas/pulls/5783 | 2013-12-28T04:16:49Z | 2013-12-28T04:17:46Z | 2013-12-28T04:17:46Z | 2014-07-16T08:44:30Z |
TST: close sparc test failures | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 7b652c36ae47d..08061b1d14863 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -40,14 +40,17 @@ class AmbiguousIndexError(PandasError, KeyError):
_POSSIBLY_CAST_DTYPES = set([np.dtype(t)
- for t in ['M8[ns]', 'm8[ns]', 'O', 'int8',
+ for t in ['M8[ns]', '>M8[ns]', '<M8[ns]',
+ 'm8[ns]', '>m8[ns]', '<m8[ns]',
+ 'O', 'int8',
'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64']])
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
-_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', 'm8[ns]']])
+_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',
+ 'm8[ns]', '<m8[ns]', '>m8[ns]']])
# define abstract base classes to enable isinstance type checking on our
@@ -1572,11 +1575,17 @@ def _possibly_cast_to_datetime(value, dtype, coerce=False):
# force the dtype if needed
if is_datetime64 and dtype != _NS_DTYPE:
- raise TypeError(
- "cannot convert datetimelike to dtype [%s]" % dtype)
+ if dtype.name == 'datetime64[ns]':
+ dtype = _NS_DTYPE
+ else:
+ raise TypeError(
+ "cannot convert datetimelike to dtype [%s]" % dtype)
elif is_timedelta64 and dtype != _TD_DTYPE:
- raise TypeError(
- "cannot convert timedeltalike to dtype [%s]" % dtype)
+ if dtype.name == 'timedelta64[ns]':
+ dtype = _TD_DTYPE
+ else:
+ raise TypeError(
+ "cannot convert timedeltalike to dtype [%s]" % dtype)
if np.isscalar(value):
if value == tslib.iNaT or isnull(value):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 6ec08fe501bcd..4e657ca343c12 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1245,8 +1245,10 @@ def _try_operate(self, values):
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
+ mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
+ result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = np.timedelta64(result)
return result
diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py
index f75cf7ebb18d1..5a842adb561b1 100644
--- a/pandas/io/tests/test_stata.py
+++ b/pandas/io/tests/test_stata.py
@@ -14,6 +14,10 @@
from pandas.util.misc import is_little_endian
from pandas import compat
+def skip_if_not_little_endian():
+ if not is_little_endian():
+ raise nose.SkipTest("known failure of test on non-little endian")
+
class TestStata(tm.TestCase):
def setUp(self):
@@ -145,9 +149,7 @@ def test_read_dta4(self):
tm.assert_frame_equal(parsed_13, expected)
def test_read_write_dta5(self):
- if not is_little_endian():
- raise nose.SkipTest("known failure of test_write_dta5 on "
- "non-little endian")
+ skip_if_not_little_endian()
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
@@ -161,9 +163,7 @@ def test_read_write_dta5(self):
original)
def test_write_dta6(self):
- if not is_little_endian():
- raise nose.SkipTest("known failure of test_write_dta6 on "
- "non-little endian")
+ skip_if_not_little_endian()
original = self.read_csv(self.csv3)
original.index.name = 'index'
@@ -193,9 +193,7 @@ def test_read_dta9(self):
tm.assert_frame_equal(parsed, expected)
def test_read_write_dta10(self):
- if not is_little_endian():
- raise nose.SkipTest("known failure of test_write_dta10 on "
- "non-little endian")
+ skip_if_not_little_endian()
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
@@ -232,6 +230,8 @@ def test_encoding(self):
self.assert_(isinstance(result, unicode))
def test_read_write_dta11(self):
+ skip_if_not_little_endian()
+
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
@@ -248,6 +248,8 @@ def test_read_write_dta11(self):
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
+ skip_if_not_little_endian()
+
original = DataFrame([(1, 2, 3, 4)],
columns=['astringwithmorethan32characters_1', 'astringwithmorethan32characters_2', '+', '-'])
formatted = DataFrame([(1, 2, 3, 4)],
| TST: closes #5778, failing tests on non-little endian for stata (spac)
TST: closes #5779, big endian compensation for datelike dtypes
| https://api.github.com/repos/pandas-dev/pandas/pulls/5780 | 2013-12-27T17:57:54Z | 2013-12-27T20:08:13Z | 2013-12-27T20:08:13Z | 2014-07-16T08:44:26Z |
BUG: setitem for iloc/loc with a slice on a Series (GH5771) | diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 8444c7a9b2a00..bfddd2e78c322 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -420,7 +420,10 @@ def can_do_equal_len():
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser):
- # indexer to assign Series can be tuple or scalar
+ # indexer to assign Series can be tuple, slice, scalar
+ if isinstance(indexer, slice):
+ indexer = tuple([indexer])
+
if isinstance(indexer, tuple):
aligners = [not _is_null_slice(idx) for idx in indexer]
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index f4e203444acfc..fe3aac0e9eeaa 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -456,6 +456,20 @@ def test_iloc_setitem(self):
result = df.iloc[:,2:3]
assert_frame_equal(result, expected)
+ # GH5771
+ s = Series(0,index=[4,5,6])
+ s.iloc[1:2] += 1
+ expected = Series([0,1,0],index=[4,5,6])
+ assert_series_equal(s, expected)
+
+ def test_loc_setitem(self):
+ # GH 5771
+ # loc with slice and series
+ s = Series(0,index=[4,5,6])
+ s.loc[4:5] += 1
+ expected = Series([1,1,0],index=[4,5,6])
+ assert_series_equal(s, expected)
+
def test_loc_getitem_int(self):
# int label
| closes #5771
| https://api.github.com/repos/pandas-dev/pandas/pulls/5772 | 2013-12-25T12:54:50Z | 2013-12-25T13:07:52Z | 2013-12-25T13:07:52Z | 2014-06-27T23:15:08Z |
BUG: regression in read_csv parser handling of usecols GH5766 | diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py
index 93a26b70a019e..484c0c89fe72d 100644
--- a/pandas/io/tests/test_parsers.py
+++ b/pandas/io/tests/test_parsers.py
@@ -2049,6 +2049,16 @@ def test_usecols(self):
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
+
+ # 5766
+ result = self.read_csv(StringIO(data), names=['a', 'b'],
+ header=None, usecols=[0, 1])
+
+ expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
+ header=None)
+ expected = expected[['a', 'b']]
+ tm.assert_frame_equal(result, expected)
+
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 36b4b91023a73..bb93097debf71 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -898,6 +898,9 @@ cdef class TextReader:
if i < self.leading_cols:
# Pass through leading columns always
name = i
+ elif self.usecols and nused == len(self.usecols):
+ # Once we've gathered all requested columns, stop. GH5766
+ break
else:
name = self._get_column_name(i, nused)
if self.has_usecols and not (i in self.usecols or
| https://api.github.com/repos/pandas-dev/pandas/pulls/5770 | 2013-12-25T01:58:46Z | 2013-12-25T01:58:59Z | 2013-12-25T01:58:59Z | 2014-06-29T15:36:16Z | |
DOC: Flesh out the R comparison section of docs (GH3980) | diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst
index c05ec01df6bcc..9aedb801250d7 100644
--- a/doc/source/comparison_with_r.rst
+++ b/doc/source/comparison_with_r.rst
@@ -4,7 +4,8 @@
.. ipython:: python
:suppress:
- from pandas import *
+ import pandas as pd
+ import numpy as np
options.display.max_rows=15
Comparison with R / R libraries
@@ -38,25 +39,25 @@ The :meth:`~pandas.DataFrame.query` method is similar to the base R ``subset``
function. In R you might want to get the rows of a ``data.frame`` where one
column's values are less than another column's values:
- .. code-block:: r
+.. code-block:: r
- df <- data.frame(a=rnorm(10), b=rnorm(10))
- subset(df, a <= b)
- df[df$a <= df$b,] # note the comma
+ df <- data.frame(a=rnorm(10), b=rnorm(10))
+ subset(df, a <= b)
+ df[df$a <= df$b,] # note the comma
In ``pandas``, there are a few ways to perform subsetting. You can use
:meth:`~pandas.DataFrame.query` or pass an expression as if it were an
index/slice as well as standard boolean indexing:
- .. ipython:: python
+.. ipython:: python
- from pandas import DataFrame
- from numpy.random import randn
+ from pandas import DataFrame
+ from numpy import random
- df = DataFrame({'a': randn(10), 'b': randn(10)})
- df.query('a <= b')
- df[df.a <= df.b]
- df.loc[df.a <= df.b]
+ df = DataFrame({'a': random.randn(10), 'b': random.randn(10)})
+ df.query('a <= b')
+ df[df.a <= df.b]
+ df.loc[df.a <= df.b]
For more details and examples see :ref:`the query documentation
<indexing.query>`.
@@ -70,20 +71,20 @@ For more details and examples see :ref:`the query documentation
An expression using a data.frame called ``df`` in R with the columns ``a`` and
``b`` would be evaluated using ``with`` like so:
- .. code-block:: r
+.. code-block:: r
- df <- data.frame(a=rnorm(10), b=rnorm(10))
- with(df, a + b)
- df$a + df$b # same as the previous expression
+ df <- data.frame(a=rnorm(10), b=rnorm(10))
+ with(df, a + b)
+ df$a + df$b # same as the previous expression
In ``pandas`` the equivalent expression, using the
:meth:`~pandas.DataFrame.eval` method, would be:
- .. ipython:: python
+.. ipython:: python
- df = DataFrame({'a': randn(10), 'b': randn(10)})
- df.eval('a + b')
- df.a + df.b # same as the previous expression
+ df = DataFrame({'a': random.randn(10), 'b': random.randn(10)})
+ df.eval('a + b')
+ df.a + df.b # same as the previous expression
In certain cases :meth:`~pandas.DataFrame.eval` will be much faster than
evaluation in pure Python. For more details and examples see :ref:`the eval
@@ -98,12 +99,194 @@ xts
plyr
----
+``plyr`` is an R library for the split-apply-combine strategy for data
+analysis. The functions revolve around three data structures in R, ``a``
+for ``arrays``, ``l`` for ``lists``, and ``d`` for ``data.frame``. The
+table below shows how these data structures could be mapped in Python.
+
++------------+-------------------------------+
+| R | Python |
++============+===============================+
+| array | list |
++------------+-------------------------------+
+| lists | dictionary or list of objects |
++------------+-------------------------------+
+| data.frame | dataframe |
++------------+-------------------------------+
+
+|ddply|_
+~~~~~~~~
+
+An expression using a data.frame called ``df`` in R where you want to
+summarize ``x`` by ``month``:
+
+
+
+.. code-block:: r
+
+ require(plyr)
+ df <- data.frame(
+ x = runif(120, 1, 168),
+ y = runif(120, 7, 334),
+ z = runif(120, 1.7, 20.7),
+ month = rep(c(5,6,7,8),30),
+ week = sample(1:4, 120, TRUE)
+ )
+
+ ddply(df, .(month, week), summarize,
+ mean = round(mean(x), 2),
+ sd = round(sd(x), 2))
+
+In ``pandas`` the equivalent expression, using the
+:meth:`~pandas.DataFrame.groupby` method, would be:
+
+
+
+.. ipython:: python
+
+ df = DataFrame({
+ 'x': random.uniform(1., 168., 120),
+ 'y': random.uniform(7., 334., 120),
+ 'z': random.uniform(1.7, 20.7, 120),
+ 'month': [5,6,7,8]*30,
+ 'week': random.randint(1,4, 120)
+ })
+
+ grouped = df.groupby(['month','week'])
+ print grouped['x'].agg([np.mean, np.std])
+
+
+For more details and examples see :ref:`the groupby documentation
+<groupby.aggregate>`.
+
reshape / reshape2
------------------
+|meltarray|_
+~~~~~~~~~~~~~
+
+An expression using a 3 dimensional array called ``a`` in R where you want to
+melt it into a data.frame:
+
+.. code-block:: r
+
+ a <- array(c(1:23, NA), c(2,3,4))
+ data.frame(melt(a))
+
+In Python, since ``a`` is a list, you can simply use list comprehension.
+
+.. ipython:: python
+
+ a = np.array(range(1,24)+[np.NAN]).reshape(2,3,4)
+ DataFrame([tuple(list(x)+[val]) for x, val in np.ndenumerate(a)])
+
+|meltlist|_
+~~~~~~~~~~~~
+
+An expression using a list called ``a`` in R where you want to melt it
+into a data.frame:
+
+.. code-block:: r
+
+ a <- as.list(c(1:4, NA))
+ data.frame(melt(a))
+
+In Python, this list would be a list of tuples, so
+:meth:`~pandas.DataFrame` method would convert it to a dataframe as required.
+
+.. ipython:: python
+
+ a = list(enumerate(range(1,5)+[np.NAN]))
+ DataFrame(a)
+
+For more details and examples see :ref:`the Into to Data Structures
+documentation <basics.dataframe.from_items>`.
+
+|meltdf|_
+~~~~~~~~~~~~~~~~
+
+An expression using a data.frame called ``cheese`` in R where you want to
+reshape the data.frame:
+
+.. code-block:: r
+
+ cheese <- data.frame(
+ first = c('John', 'Mary'),
+ last = c('Doe', 'Bo'),
+ height = c(5.5, 6.0),
+ weight = c(130, 150)
+ )
+ melt(cheese, id=c("first", "last"))
+
+In Python, the :meth:`~pandas.melt` method is the R equivalent:
+
+.. ipython:: python
+
+ cheese = DataFrame({'first' : ['John', 'Mary'],
+ 'last' : ['Doe', 'Bo'],
+ 'height' : [5.5, 6.0],
+ 'weight' : [130, 150]})
+ pd.melt(cheese, id_vars=['first', 'last'])
+ cheese.set_index(['first', 'last']).stack() # alternative way
+
+For more details and examples see :ref:`the reshaping documentation
+<reshaping.melt>`.
+
+|cast|_
+~~~~~~~
+
+An expression using a data.frame called ``df`` in R to cast into a higher
+dimensional array:
+
+.. code-block:: r
+
+ df <- data.frame(
+ x = runif(12, 1, 168),
+ y = runif(12, 7, 334),
+ z = runif(12, 1.7, 20.7),
+ month = rep(c(5,6,7),4),
+ week = rep(c(1,2), 6)
+ )
+
+ mdf <- melt(df, id=c("month", "week"))
+ acast(mdf, week ~ month ~ variable, mean)
+
+In Python the best way is to make use of :meth:`~pandas.pivot_table`:
+
+.. ipython:: python
+
+ df = DataFrame({
+ 'x': random.uniform(1., 168., 12),
+ 'y': random.uniform(7., 334., 12),
+ 'z': random.uniform(1.7, 20.7, 12),
+ 'month': [5,6,7]*4,
+ 'week': [1,2]*6
+ })
+ mdf = pd.melt(df, id_vars=['month', 'week'])
+ pd.pivot_table(mdf, values='value', rows=['variable','week'],
+ cols=['month'], aggfunc=np.mean)
+
+For more details and examples see :ref:`the reshaping documentation
+<reshaping.pivot>`.
.. |with| replace:: ``with``
.. _with: http://finzi.psych.upenn.edu/R/library/base/html/with.html
.. |subset| replace:: ``subset``
.. _subset: http://finzi.psych.upenn.edu/R/library/base/html/subset.html
+
+.. |ddply| replace:: ``ddply``
+.. _ddply: http://www.inside-r.org/packages/cran/plyr/docs/ddply
+
+.. |meltarray| replace:: ``melt.array``
+.. _meltarray: http://www.inside-r.org/packages/cran/reshape2/docs/melt.array
+
+.. |meltlist| replace:: ``melt.list``
+.. meltlist: http://www.inside-r.org/packages/cran/reshape2/docs/melt.list
+
+.. |meltdf| replace:: ``melt.data.frame``
+.. meltdf: http://www.inside-r.org/packages/cran/reshape2/docs/melt.data.frame
+
+.. |cast| replace:: ``cast``
+.. cast: http://www.inside-r.org/packages/cran/reshape2/docs/cast
+
| Some additions to #3980.
I've done some more comparisons to the plyr functions located [here](http://nbviewer.ipython.org/gist/chappers/8066230/). More specifically [aaply](http://nbviewer.ipython.org/gist/chappers/8066230/aaply.ipynb), [alply](http://nbviewer.ipython.org/gist/chappers/8066230/alply.ipynb) don't really have much to do with pandas (its just list comprehension). and [dlply](http://nbviewer.ipython.org/gist/chappers/8066230/dlplyr.ipynb) is probably more suited to be in statsmodel example rather than pandas.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5761 | 2013-12-21T10:04:08Z | 2013-12-28T15:55:45Z | 2013-12-28T15:55:45Z | 2014-06-19T00:03:47Z |
Update rolling skew & kurtosis to handle cases where they aren't defined | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8de8929c5fa7a..0666eb7f88675 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -111,6 +111,7 @@ Bug Fixes
- Bug in ``pd.read_msgpack`` with inferring a ``DateTimeIndex`` frequencey
incorrectly (:issue:`5947`)
- Fixed ``to_datetime`` for array with both Tz-aware datetimes and ``NaT``s (:issue:`5961`)
+ - Bug in rolling skew/kurtosis when passed a Series with bad data (:issue:`5749`)
pandas 0.13.0
-------------
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 08ec707b0d96d..d916de32b7cd3 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -1167,8 +1167,11 @@ def roll_skew(ndarray[double_t] input, int win, int minp):
R = sqrt(B)
- output[i] = ((sqrt(nobs * (nobs - 1.)) * C) /
- ((nobs-2) * R * R * R))
+ if B == 0 or nobs < 3:
+ output[i] = NaN
+ else:
+ output[i] = ((sqrt(nobs * (nobs - 1.)) * C) /
+ ((nobs-2) * R * R * R))
else:
output[i] = NaN
@@ -1236,10 +1239,15 @@ def roll_kurt(ndarray[double_t] input,
R = R * A
D = xxxx / nobs - R - 6*B*A*A - 4*C*A
- K = (nobs * nobs - 1.)*D/(B*B) - 3*((nobs-1.)**2)
- K = K / ((nobs - 2.)*(nobs-3.))
+ if B == 0 or nobs < 4:
+ output[i] = NaN
+
+ else:
+ K = (nobs * nobs - 1.)*D/(B*B) - 3*((nobs-1.)**2)
+ K = K / ((nobs - 2.)*(nobs-3.))
+
+ output[i] = K
- output[i] = K
else:
output[i] = NaN
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 7381d4c1ae0b4..970adeace1e0f 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -741,6 +741,50 @@ def test_expanding_corr_pairwise(self):
for i in result.items:
assert_almost_equal(result[i], rolling_result[i])
+ def test_rolling_skew_edge_cases(self):
+
+ all_nan = Series([np.NaN] * 5)
+
+ # yields all NaN (0 variance)
+ d = Series([1] * 5)
+ x = mom.rolling_skew(d, window=5)
+ assert_series_equal(all_nan, x)
+
+ # yields all NaN (window too small)
+ d = Series(np.random.randn(5))
+ x = mom.rolling_skew(d, window=2)
+ assert_series_equal(all_nan, x)
+
+ # yields [NaN, NaN, NaN, 0.177994, 1.548824]
+ d = Series([-1.50837035, -0.1297039 , 0.19501095,
+ 1.73508164, 0.41941401])
+ expected = Series([np.NaN, np.NaN, np.NaN,
+ 0.177994, 1.548824])
+ x = mom.rolling_skew(d, window=4)
+ assert_series_equal(expected, x)
+
+ def test_rolling_kurt_edge_cases(self):
+
+ all_nan = Series([np.NaN] * 5)
+
+ # yields all NaN (0 variance)
+ d = Series([1] * 5)
+ x = mom.rolling_kurt(d, window=5)
+ assert_series_equal(all_nan, x)
+
+ # yields all NaN (window too small)
+ d = Series(np.random.randn(5))
+ x = mom.rolling_kurt(d, window=3)
+ assert_series_equal(all_nan, x)
+
+ # yields [NaN, NaN, NaN, 1.224307, 2.671499]
+ d = Series([-1.50837035, -0.1297039 , 0.19501095,
+ 1.73508164, 0.41941401])
+ expected = Series([np.NaN, np.NaN, np.NaN,
+ 1.224307, 2.671499])
+ x = mom.rolling_kurt(d, window=4)
+ assert_series_equal(expected, x)
+
def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
result = func(self.arr)
| closes #5749
The rolling skewness and kurtosis in algos.pyx were modified to match
the testing logic in pandas/core/nanops.py. They now both return NaN
where they are not defined, which occurs where there are either too
few observations or where the variance is zero.
A set of tests was added to verify that Nan is returned in these cases
and that the computations continue to work correctly when the values
are defined.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5760 | 2013-12-20T19:35:08Z | 2014-01-16T14:27:39Z | 2014-01-16T14:27:39Z | 2014-06-20T06:05:10Z |
API: Series.ravel compat with ndarray | diff --git a/pandas/core/series.py b/pandas/core/series.py
index c41cdc89b7bb1..f147eb87d7480 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -317,8 +317,8 @@ def ndim(self):
def base(self):
return self.values.base
- def ravel(self):
- return self.values.ravel()
+ def ravel(self, order='C'):
+ return self.values.ravel(order=order)
def transpose(self):
""" support for compatiblity """
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 21f94f0c5d9e1..16e3368a2710d 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -3253,6 +3253,10 @@ def f(x):
expected = Series(1,index=range(10),dtype='float64')
#assert_series_equal(result,expected)
+ # ravel
+ s = Series(np.random.randn(10))
+ tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F'))
+
def test_complexx(self):
# GH4819
| related #5698
| https://api.github.com/repos/pandas-dev/pandas/pulls/5759 | 2013-12-20T14:48:20Z | 2013-12-20T15:10:26Z | 2013-12-20T15:10:26Z | 2014-06-25T21:40:52Z |
BUG: empty Series construction (GH5756), concat issues (GH5754) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8ac168e18233f..173d03f9be3c8 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -247,7 +247,8 @@ API Changes
(:issue:`4390`)
- allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when
the single-key is not currently contained in the index for that axis
- (:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`, :issue:`5744`)
+ (:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`,
+ :issue:`5744`, :issue:`5756`)
- Default export for ``to_clipboard`` is now csv with a sep of `\t` for
compat (:issue:`3368`)
- ``at`` now will enlarge the object inplace (and return the same)
@@ -827,6 +828,7 @@ Bug Fixes
- Bug in fillna with Series and a passed series/dict (:issue:`5703`)
- Bug in groupby transform with a datetime-like grouper (:issue:`5712`)
- Bug in multi-index selection in PY3 when using certain keys (:issue:`5725`)
+ - Row-wise concat of differeing dtypes failing in certain cases (:issue:`5754`)
pandas 0.12.0
-------------
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 8a3869d15c85f..ed6d7fef4dd66 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -325,15 +325,16 @@ def _init_dict(self, data, index, columns, dtype=None):
def _init_ndarray(self, values, index, columns, dtype=None,
copy=False):
if isinstance(values, Series):
- if columns is None and values.name is not None:
- columns = [values.name]
+ if columns is None:
+ if values.name is not None:
+ columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
- if not len(values) and len(columns):
+ if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
values = _prep_ndarray(values, copy=copy)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 3a29fa41046ca..5e00d14a0e0cb 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -6154,6 +6154,48 @@ def test_append_empty_dataframe(self):
expected = df1.copy()
assert_frame_equal(result, expected)
+ def test_append_dtypes(self):
+
+ # GH 5754
+ # row appends of different dtypes (so need to do by-item)
+ # can sometimes infer the correct type
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(5))
+ df2 = DataFrame()
+ result = df1.append(df2)
+ expected = df1.copy()
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : 'foo' }, index=lrange(1,2))
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : [ Timestamp('20130101'), 'foo' ]})
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2))
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2), dtype=object)
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : np.nan }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1,2))
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : Series([ np.nan, Timestamp('20130101')] ,dtype='M8[ns]') })
+ assert_frame_equal(result, expected)
+
+ df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
+ df2 = DataFrame({ 'bar' : 1 }, index=lrange(1,2), dtype=object)
+ result = df1.append(df2)
+ expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), 1 ]) })
+ assert_frame_equal(result, expected)
+
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)
rule_monthly = self.tsframe.asfreq('BM')
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index c7fb209b4aacb..f4e203444acfc 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1793,6 +1793,14 @@ def f():
expected = DataFrame(columns=['A','B','C'])
assert_frame_equal(result,expected)
+ # GH 5756
+ # setting with empty Series
+ df = DataFrame(Series())
+ assert_frame_equal(df, DataFrame({ 0 : Series() }))
+
+ df = DataFrame(Series(name='foo'))
+ assert_frame_equal(df, DataFrame({ 'foo' : Series() }))
+
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index c76bdea950650..dd7ab65869303 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -1139,52 +1139,55 @@ def _concat_blocks(self, blocks):
def _concat_single_item(self, objs, item):
# this is called if we don't have consistent dtypes in a row-wise append
-
all_values = []
- dtypes = set()
+ dtypes = []
+ alls = set()
+ # figure out the resulting dtype of the combination
for data, orig in zip(objs, self.objs):
+ d = dict([ (t,False) for t in ['object','datetime','timedelta','other'] ])
if item in orig:
values = data.get(item)
if hasattr(values,'to_dense'):
values = values.to_dense()
- dtypes.add(values.dtype)
all_values.append(values)
- else:
- all_values.append(None)
- # figure out the resulting dtype of the combination
- alls = set()
- seen = []
- for dtype in dtypes:
- d = dict([ (t,False) for t in ['object','datetime','timedelta','other'] ])
- if issubclass(dtype.type, (np.object_, np.bool_)):
- d['object'] = True
- alls.add('object')
- elif is_datetime64_dtype(dtype):
- d['datetime'] = True
- alls.add('datetime')
- elif is_timedelta64_dtype(dtype):
- d['timedelta'] = True
- alls.add('timedelta')
+ dtype = values.dtype
+
+ if issubclass(dtype.type, (np.object_, np.bool_)):
+ d['object'] = True
+ alls.add('object')
+ elif is_datetime64_dtype(dtype):
+ d['datetime'] = True
+ alls.add('datetime')
+ elif is_timedelta64_dtype(dtype):
+ d['timedelta'] = True
+ alls.add('timedelta')
+ else:
+ d['other'] = True
+ alls.add('other')
+
else:
+ all_values.append(None)
d['other'] = True
alls.add('other')
- seen.append(d)
+
+ dtypes.append(d)
if 'datetime' in alls or 'timedelta' in alls:
if 'object' in alls or 'other' in alls:
- for v, s in zip(all_values,seen):
- if s.get('datetime') or s.get('timedelta'):
+
+ for v, d in zip(all_values,dtypes):
+ if d.get('datetime') or d.get('timedelta'):
pass
# if we have all null, then leave a date/time like type
# if we have only that type left
- elif isnull(v).all():
+ elif v is None or isnull(v).all():
- alls.remove('other')
- alls.remove('object')
+ alls.discard('other')
+ alls.discard('object')
# create the result
if 'object' in alls:
@@ -1200,7 +1203,7 @@ def _concat_single_item(self, objs, item):
to_concat = []
for obj, item_values in zip(objs, all_values):
- if item_values is None:
+ if item_values is None or isnull(item_values).all():
shape = obj.shape[1:]
missing_arr = np.empty(shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
| closes #5756, BUG: construction of DataFrame from empty Series regression
closes #5754, BUG: Row-wise concat of differeing dtypes failing in certain cases
| https://api.github.com/repos/pandas-dev/pandas/pulls/5757 | 2013-12-20T13:06:10Z | 2013-12-20T14:08:05Z | 2013-12-20T14:08:05Z | 2014-07-02T10:56:26Z |
ENH: set display.max_seq_items default != None | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8ac168e18233f..cb8d745099c1a 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -388,6 +388,8 @@ API Changes
dates are given (:issue:`5242`)
- ``Timestamp`` now supports ``now/today/utcnow`` class methods
(:issue:`5339`)
+ - default for `display.max_seq_len` is now 100 rather then `None`. This activates
+ truncated display ("...") of long sequences in various places. (:issue:`3391`)
- **All** division with ``NDFrame`` - likes is now truedivision, regardless
of the future import. You can use ``//`` and ``floordiv`` to do integer
division.
diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt
index 720150015909e..7c0472fc07de5 100644
--- a/doc/source/v0.13.0.txt
+++ b/doc/source/v0.13.0.txt
@@ -147,6 +147,8 @@ These were announced changes in 0.12 or prior that are taking effect as of 0.13.
- Remove deprecated ``_verbose_info`` (:issue:`3215`)
- Remove deprecated ``read_clipboard/to_clipboard/ExcelFile/ExcelWriter`` from ``pandas.io.parsers`` (:issue:`3717`)
- default for ``tupleize_cols`` is now ``False`` for both ``to_csv`` and ``read_csv``. Fair warning in 0.12 (:issue:`3604`)
+- default for `display.max_seq_len` is now 100 rather then `None`. This activates
+ truncated display ("...") of long sequences in various places. (:issue:`3391`)
Deprecations
~~~~~~~~~~~~
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py
index b7ec76522b60c..e4d4ea74ac169 100644
--- a/pandas/core/config_init.py
+++ b/pandas/core/config_init.py
@@ -246,7 +246,7 @@ def mpl_style_cb(key):
validator=is_text)
cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)
cf.register_option('chop_threshold', None, pc_chop_threshold_doc)
- cf.register_option('max_seq_items', None, pc_max_seq_items)
+ cf.register_option('max_seq_items', 100, pc_max_seq_items)
cf.register_option('mpl_style', None, pc_mpl_style_doc,
validator=is_one_of_factory([None, False, 'default']),
cb=mpl_style_cb)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index f09becb5befb7..f66c59fade2c1 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -141,9 +141,8 @@ def test_repr_chop_threshold(self):
def test_repr_obeys_max_seq_limit(self):
import pandas.core.common as com
- #unlimited
- reset_option("display.max_seq_items")
- self.assertTrue(len(com.pprint_thing(lrange(1000)))> 2000)
+ with option_context("display.max_seq_items",2000):
+ self.assertTrue(len(com.pprint_thing(lrange(1000))) > 1000)
with option_context("display.max_seq_items",5):
self.assertTrue(len(com.pprint_thing(lrange(1000)))< 100)
| closes #3391.
No idea what the default value should be. numpy equivalent default is 1000, but even 100 is too big IMO.
The default is primarily there to mitigate output bombs when displaying some pandas objects.
Still need to update the release notes on all these micro-PRs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5753 | 2013-12-19T22:54:03Z | 2013-12-20T15:55:34Z | 2013-12-20T15:55:34Z | 2014-06-27T10:14:01Z |
ENH: expose option_context as a top-level API GH5618 | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 46745d94b5f78..adff5a3c74f90 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1457,6 +1457,21 @@ It's also possible to reset multiple options at once (using a regex):
reset_option("^display")
+.. versionadded:: 0.14.0
+
+ Beginning with v0.14.0 the `option_context` context manager has been exposed through
+ the top-level API, allowing you to execute code with given option values. Option values
+ are restored automatically when you exit the `with` block:
+
+.. ipython:: python
+
+ with option_context("display.max_rows",10,"display.max_columns", 5):
+ print get_option("display.max_rows")
+ print get_option("display.max_columns")
+
+ print get_option("display.max_rows")
+ print get_option("display.max_columns")
+
Console Output Formatting
-------------------------
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 7109b87f5352b..d4c9fa07e546f 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -65,6 +65,7 @@ Improvements to existing features
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- perf improvements in Series datetime/timedelta binary operations (:issue:`5801`)
+ - `option_context` context manager now available as top-level API (:issue:`5752`)
Bug Fixes
~~~~~~~~~
diff --git a/pandas/core/api.py b/pandas/core/api.py
index d75c075d22d7c..b36c9f7499df6 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -31,4 +31,4 @@
import pandas.core.datetools as datetools
from pandas.core.config import (get_option, set_option, reset_option,
- describe_option, options)
+ describe_option, option_context, options)
diff --git a/pandas/core/config.py b/pandas/core/config.py
index 4bec029851092..f2f932e39759a 100644
--- a/pandas/core/config.py
+++ b/pandas/core/config.py
@@ -100,54 +100,29 @@ def _get_option(pat, silent=False):
return root[k]
-def _set_single_option(pat, value, silent):
- key = _get_single_key(pat, silent)
-
- o = _get_registered_option(key)
- if o and o.validator:
- o.validator(value)
-
- # walk the nested dict
- root, k = _get_root(key)
- root[k] = value
-
- if o.cb:
- o.cb(key)
-
-
-def _set_multiple_options(args, silent):
- for k, v in zip(args[::2], args[1::2]):
- _set_single_option(k, v, silent)
-
-
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
- raise AssertionError("Must provide an even number of non-keyword "
+ raise ValueError("Must provide an even number of non-keyword "
"arguments")
- # must be 0 or 1 kwargs
- nkwargs = len(kwargs)
- if nkwargs not in (0, 1):
- raise AssertionError("The can only be 0 or 1 keyword arguments")
+ # default to false
+ silent = kwargs.get('silent', False)
- # if 1 kwarg then it must be silent=True or silent=False
- if nkwargs:
- k, = list(kwargs.keys())
- v, = list(kwargs.values())
+ for k, v in zip(args[::2], args[1::2]):
+ key = _get_single_key(k, silent)
- if k != 'silent':
- raise ValueError("the only allowed keyword argument is 'silent', "
- "you passed '{0}'".format(k))
- if not isinstance(v, bool):
- raise TypeError("the type of the keyword argument passed must be "
- "bool, you passed a {0}".format(v.__class__))
+ o = _get_registered_option(key)
+ if o and o.validator:
+ o.validator(v)
- # default to false
- silent = kwargs.get('silent', False)
- _set_multiple_options(args, silent)
+ # walk the nested dict
+ root, k = _get_root(key)
+ root[k] = v
+ if o.cb:
+ o.cb(key)
def _describe_option(pat='', _print_desc=True):
@@ -365,7 +340,7 @@ class option_context(object):
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
- raise AssertionError(
+ raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py
index 80a3fe9be7003..6d4486525f4eb 100644
--- a/pandas/tests/test_config.py
+++ b/pandas/tests/test_config.py
@@ -170,26 +170,13 @@ def test_set_option(self):
def test_set_option_empty_args(self):
- self.assertRaises(AssertionError, self.cf.set_option)
+ self.assertRaises(ValueError, self.cf.set_option)
def test_set_option_uneven_args(self):
- self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2, 'b.c')
-
-
- def test_set_option_2_kwargs(self):
- self.assertRaises(AssertionError, self.cf.set_option, 'a.b', 2,
- silenadf=2, asdf=2)
-
- def test_set_option_invalid_kwargs_key(self):
- self.assertRaises(ValueError, self.cf.set_option, 'a.b', 2,
- silenadf=2)
-
- def test_set_option_invalid_kwargs_value_type(self):
- self.assertRaises(TypeError, self.cf.set_option, 'a.b', 2,
- silent=2)
+ self.assertRaises(ValueError, self.cf.set_option, 'a.b', 2, 'b.c')
def test_set_option_invalid_single_argument_type(self):
- self.assertRaises(AssertionError, self.cf.set_option, 2)
+ self.assertRaises(ValueError, self.cf.set_option, 2)
def test_set_option_multiple(self):
self.cf.register_option('a', 1, 'doc')
| <del>
Moving `option_context` to toplevel rather then making `set_option` a context manager means
a one line change instead of metaclasses and config_prefix subtlety and the rest of the ich.
When a given approach to something explodes in complexity I'll [bravely, bravely run away](http://www.youtube.com/watch?v=BZwuTo7zKM8) every single time.
</del>
closes #5618
replaces #5625.
@jtratner, your points about config_prefix being broken are valid, feel free to pick up in the future
if you're so inclined.
cc @jseabold
| https://api.github.com/repos/pandas-dev/pandas/pulls/5752 | 2013-12-19T22:42:42Z | 2013-12-31T00:49:22Z | 2013-12-31T00:49:22Z | 2014-06-14T10:26:45Z |
DOC: added missing argument in Series.apply | diff --git a/pandas/core/series.py b/pandas/core/series.py
index ecfd99e61a090..c41cdc89b7bb1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1993,6 +1993,9 @@ def apply(self, func, convert_dtype=True, args=(), **kwds):
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object
+ args : tuple
+ Positional arguments to pass to function in addition to the value
+ Additional keyword arguments will be passed as keywords to the function
See also
--------
| https://api.github.com/repos/pandas-dev/pandas/pulls/5750 | 2013-12-19T20:57:33Z | 2013-12-20T01:55:41Z | 2013-12-20T01:55:41Z | 2015-04-25T23:33:13Z | |
API/REGRESS: partial revert of f8b6208675b5b10d73a74f50478fa5e37b43fc02 (GH5720,GH5744) | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4d6af77880747..8a3869d15c85f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1916,10 +1916,11 @@ def _ensure_valid_index(self, value):
'Series')
self._data.set_axis(1, value.index.copy(), check_axis=False)
+ # we are a scalar
+ # noop
else:
- raise ValueError('Cannot set a frame with no defined index '
- 'and a value that cannot be converted to a '
- 'Series')
+
+ pass
def _set_item(self, key, value):
"""
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 11bf985bea041..c7fb209b4aacb 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1778,14 +1778,12 @@ def f():
# don't create rows when empty
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
- def f():
- y['New'] = np.nan
- self.assertRaises(ValueError, f)
+ y['New'] = np.nan
+ assert_frame_equal(y,DataFrame(columns=['A','B','New']))
df = DataFrame(columns=['a', 'b', 'c c'])
- def f():
- df['d'] = 3
- self.assertRaises(ValueError, f)
+ df['d'] = 3
+ assert_frame_equal(df,DataFrame(columns=['a','b','c c','d']))
assert_series_equal(df['c c'],Series(name='c c',dtype=object))
# reindex columns is ok
| ```
allow assignment of a column in a frame with a scalar with no index (so adds to the columns),
instead of raising; this preservers 0.12 behavior
```
related #5720, #5744
going back to 0.12 behavior
effectively can add a column by assigning a scalar to a frame that doesn't have an index
need a more compelling reason to raise here
```
In [4]: df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
In [5]: y = df[df.A > 5]
In [6]: y
Out[6]:
Empty DataFrame
Columns: [A, B]
Index: []
[0 rows x 2 columns]
In [7]: y['New'] = np.nan
In [8]: y
Out[8]:
Empty DataFrame
Columns: [A, B, New]
Index: []
[0 rows x 3 columns]
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5747 | 2013-12-19T17:32:02Z | 2013-12-19T18:04:07Z | 2013-12-19T18:04:07Z | 2014-06-21T16:49:50Z |
BUG: don't allow an empty dataframe to have scalar assignment succeed (GH5744) | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 3a22de3cb43f3..8ac168e18233f 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -247,7 +247,7 @@ API Changes
(:issue:`4390`)
- allow ``ix/loc`` for Series/DataFrame/Panel to set on any axis even when
the single-key is not currently contained in the index for that axis
- (:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`)
+ (:issue:`2578`, :issue:`5226`, :issue:`5632`, :issue:`5720`, :issue:`5744`)
- Default export for ``to_clipboard`` is now csv with a sep of `\t` for
compat (:issue:`3368`)
- ``at`` now will enlarge the object inplace (and return the same)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2f299488bd321..4d6af77880747 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1916,6 +1916,11 @@ def _ensure_valid_index(self, value):
'Series')
self._data.set_axis(1, value.index.copy(), check_axis=False)
+ else:
+ raise ValueError('Cannot set a frame with no defined index '
+ 'and a value that cannot be converted to a '
+ 'Series')
+
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index e601755ba8aaf..010020630cd18 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -1753,13 +1753,26 @@ def f():
str(df)
assert_frame_equal(df,expected)
- # GH5720
+ # GH5720, GH5744
# don't create rows when empty
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
- y['New'] = np.nan
- expected = DataFrame(columns=['A','B','New'])
- assert_frame_equal(y, expected)
+ def f():
+ y['New'] = np.nan
+ self.assertRaises(ValueError, f)
+
+ df = DataFrame(columns=['a', 'b', 'c c'])
+ def f():
+ df['d'] = 3
+ self.assertRaises(ValueError, f)
+ assert_series_equal(df['c c'],Series(name='c c',dtype=object))
+
+ # reindex columns is ok
+ df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
+ y = df[df.A > 5]
+ result = y.reindex(columns=['A','B','C'])
+ expected = DataFrame(columns=['A','B','C'])
+ assert_frame_equal(result,expected)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
| closes #5744
related #5720
I think this is the correct behavior
```
In [1]: df = pd.DataFrame(columns=['a', 'b', 'c c'])
In [2]: df['d'] = 3
ValueError: Cannot set a frame with no defined index and a value that cannot be converted to a Series
In [3]: df['c c']
Out[3]: Series([], name: c c, dtype: object)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5745 | 2013-12-19T14:43:10Z | 2013-12-19T15:20:21Z | 2013-12-19T15:20:21Z | 2014-06-16T20:48:04Z |
BUG: don't lose dtypes when concatenating empty array-likes | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 99b8bfc460068..9650089279f12 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -118,6 +118,7 @@ Bug Fixes
- Bug in rolling skew/kurtosis when passed a Series with bad data (:issue:`5749`)
- Bug in scipy ``interpolate`` methods with a datetime index (:issue:`5975`)
- Bug in NaT comparison if a mixed datetime/np.datetime64 with NaT were passed (:issue:`5968`)
+ - Fixed bug with ``pd.concat`` losing dtype information if all inputs are empty (:issue:`5742`)
pandas 0.13.0
-------------
diff --git a/pandas/core/common.py b/pandas/core/common.py
index e8bcfa71fe32a..cd78f35aabdf9 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2326,20 +2326,23 @@ def _check_as_is(x):
def _concat_compat(to_concat, axis=0):
# filter empty arrays
- to_concat = [x for x in to_concat if x.shape[axis] > 0]
-
- # return the empty np array, if nothing to concatenate, #3121
- if not to_concat:
- return np.array([], dtype=object)
-
- is_datetime64 = [x.dtype == _NS_DTYPE for x in to_concat]
- if all(is_datetime64):
- # work around NumPy 1.6 bug
- new_values = np.concatenate([x.view(np.int64) for x in to_concat],
- axis=axis)
- return new_values.view(_NS_DTYPE)
- elif any(is_datetime64):
- to_concat = [_to_pydatetime(x) for x in to_concat]
+ nonempty = [x for x in to_concat if x.shape[axis] > 0]
+
+ # If all arrays are empty, there's nothing to convert, just short-cut to
+ # the concatenation, #3121.
+ #
+ # Creating an empty array directly is tempting, but the winnings would be
+ # marginal given that it would still require shape & dtype calculation and
+ # np.concatenate which has them both implemented is compiled.
+ if nonempty:
+ is_datetime64 = [x.dtype == _NS_DTYPE for x in nonempty]
+ if all(is_datetime64):
+ # work around NumPy 1.6 bug
+ new_values = np.concatenate([x.view(np.int64) for x in nonempty],
+ axis=axis)
+ return new_values.view(_NS_DTYPE)
+ elif any(is_datetime64):
+ to_concat = [_to_pydatetime(x) for x in nonempty]
return np.concatenate(to_concat, axis=axis)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index edcf7a0a491b0..3b6e4ba445ce0 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -11909,6 +11909,23 @@ def test_to_csv_date_format(self):
assert_frame_equal(test, nat_frame)
+ def test_concat_empty_dataframe_dtypes(self):
+ df = DataFrame(columns=list("abc"))
+ df['a'] = df['a'].astype(np.bool_)
+ df['b'] = df['b'].astype(np.int32)
+ df['c'] = df['c'].astype(np.float64)
+
+ result = pd.concat([df, df])
+ self.assertEqual(result['a'].dtype, np.bool_)
+ self.assertEqual(result['b'].dtype, np.int32)
+ self.assertEqual(result['c'].dtype, np.float64)
+
+ result = pd.concat([df, df.astype(np.float64)])
+ self.assertEqual(result['a'].dtype, np.object_)
+ self.assertEqual(result['b'].dtype, np.float64)
+ self.assertEqual(result['c'].dtype, np.float64)
+
+
def skip_if_no_ne(engine='numexpr'):
if engine == 'numexpr':
try:
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 70dd38c2641ef..6b4a9a2bc4c22 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5441,6 +5441,15 @@ def test_numpy_unique(self):
# it works!
result = np.unique(self.ts)
+ def test_concat_empty_series_dtypes(self):
+ self.assertEqual(pd.concat([Series(dtype=np.float64)]).dtype, np.float64)
+ self.assertEqual(pd.concat([Series(dtype=np.int8)]).dtype, np.int8)
+ self.assertEqual(pd.concat([Series(dtype=np.bool_)]).dtype, np.bool_)
+
+ self.assertEqual(pd.concat([Series(dtype=np.bool_),
+ Series(dtype=np.int32)]).dtype, np.int32)
+
+
class TestSeriesNonUnique(tm.TestCase):
| I develop an application that does quite a bit of data manipulation. Being aware of `pandas` being functional-but-not-really-heavily-optimized I use it to maintain label consistency and for grouping/merging data, heavy-duty maths is usually done with `numpy` ufuncs. The application contains entities that have no data at the beginning and receive data over their lifetimes. Every once in a while an incoming data chunk will contain no data for a certain entity. Usually it's fine but if the entity was just created the following happens:
``` python
In [1]: pd.__version__
Out[1]: '0.13.0rc1-92-gf6fd509'
In [2]: data = pd.Series(dtype=np.float)
In [3]: chunk = pd.Series(dtype=np.float)
In [4]: pd.concat([data, chunk])
Out[4]: Series([], dtype: object)
```
After that ufuncs like `isnan` cease to work on `data.values` since its dtype has changed to `object`. This PR fixes it.
| https://api.github.com/repos/pandas-dev/pandas/pulls/5742 | 2013-12-19T06:41:20Z | 2014-01-18T14:20:24Z | 2014-01-18T14:20:24Z | 2014-06-15T06:34:27Z |
BUG: return Series as DataFrame.dtypes/ftypes for empty dataframes | diff --git a/doc/source/release.rst b/doc/source/release.rst
index 8b753abc83ca7..c33c2fe61429a 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -65,6 +65,7 @@ API Changes
- ``select_as_multiple`` will always raise a ``KeyError``, when a key or the selector is not found (:issue:`6177`)
- ``df['col'] = value`` and ``df.loc[:,'col'] = value`` are now completely equivalent;
previously the ``.loc`` would not necessarily coerce the dtype of the resultant series (:issue:`6149`)
+- ``dtypes`` and ``ftypes`` now return a series with ``dtype=object`` on empty containers (:issue:`5740`)
Experimental Features
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c8e1247416806..d607be6bfb733 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1947,7 +1947,8 @@ def get_ftype_counts(self):
def dtypes(self):
""" Return the dtypes in this object """
from pandas import Series
- return Series(self._data.get_dtypes(),index=self._info_axis)
+ return Series(self._data.get_dtypes(), index=self._info_axis,
+ dtype=np.object_)
@property
def ftypes(self):
@@ -1956,7 +1957,8 @@ def ftypes(self):
in this object.
"""
from pandas import Series
- return Series(self._data.get_ftypes(),index=self._info_axis)
+ return Series(self._data.get_ftypes(), index=self._info_axis,
+ dtype=np.object_)
def as_blocks(self, columns=None):
"""
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 6eddd52dba634..f85c95e8b81db 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -12164,6 +12164,47 @@ def test_concat_empty_dataframe_dtypes(self):
self.assertEqual(result['b'].dtype, np.float64)
self.assertEqual(result['c'].dtype, np.float64)
+ def test_empty_frame_dtypes_ftypes(self):
+ empty_df = pd.DataFrame()
+ assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
+ assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
+
+ nocols_df = pd.DataFrame(index=[1,2,3])
+ assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
+ assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
+
+ norows_df = pd.DataFrame(columns=list("abc"))
+ assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list("abc")))
+ assert_series_equal(norows_df.ftypes, pd.Series('object:dense', index=list("abc")))
+
+ norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
+ assert_series_equal(norows_int_df.dtypes, pd.Series(np.dtype('int32'), index=list("abc")))
+ assert_series_equal(norows_int_df.ftypes, pd.Series('int32:dense', index=list("abc")))
+
+ odict = OrderedDict
+ df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3])
+ assert_series_equal(df.dtypes, pd.Series(odict([('a', np.int64),
+ ('b', np.bool),
+ ('c', np.float64)])))
+ assert_series_equal(df.ftypes, pd.Series(odict([('a', 'int64:dense'),
+ ('b', 'bool:dense'),
+ ('c', 'float64:dense')])))
+
+ # same but for empty slice of df
+ assert_series_equal(df[:0].dtypes, pd.Series(odict([('a', np.int),
+ ('b', np.bool),
+ ('c', np.float)])))
+ assert_series_equal(df[:0].ftypes, pd.Series(odict([('a', 'int64:dense'),
+ ('b', 'bool:dense'),
+ ('c', 'float64:dense')])))
+
+def skip_if_no_ne(engine='numexpr'):
+ if engine == 'numexpr':
+ try:
+ import numexpr as ne
+ except ImportError:
+ raise nose.SkipTest("cannot query engine numexpr when numexpr not "
+ "installed")
def skip_if_no_pandas_parser(parser):
| `DataFrame.dtypes` and `DataFrame.ftypes` values were inconsistent for empty dataframes:
``` python
In [2]: pd.DataFrame().dtypes
Out[2]:
Empty DataFrame
Columns: []
Index: []
[0 rows x 0 columns]
In [3]: pd.DataFrame().ftypes
Out[3]:
Empty DataFrame
Columns: []
Index: []
[0 rows x 0 columns]
In [4]: pd.DataFrame(columns=list("abc")).ftypes
Out[4]:
a NaN
b NaN
c NaN
dtype: float64
In [5]: pd.DataFrame(columns=list("abc")).dtypes
Out[5]:
a NaN
b NaN
c NaN
dtype: float64
In [6]: pd.__version__
Out[6]: '0.13.0rc1-92-gf6fd509'
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/5740 | 2013-12-19T05:42:17Z | 2014-02-17T13:59:56Z | 2014-02-17T13:59:56Z | 2014-06-12T17:49:32Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.