title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
DEPR: is_categorical
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 3dd8fd4a38d7e..a8c72ccb666a5 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -269,7 +269,7 @@ Deprecations version 1.1. All other arguments should be given as keyword arguments (:issue:`27573`). -- +- :func:`pandas.api.types.is_categorical` is deprecated and will be removed in a future version; use `:func:pandas.api.types.is_categorical_dtype` instead (:issue:`33385`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 16373bd697c1f..6a9ecfe709847 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -125,7 +125,7 @@ def ensure_categorical(arr): cat_arr : The original array cast as a Categorical. If it already is a Categorical, we return as is. """ - if not is_categorical(arr): + if not is_categorical_dtype(arr.dtype): from pandas import Categorical arr = Categorical(arr) @@ -360,6 +360,12 @@ def is_categorical(arr) -> bool: >>> is_categorical(pd.CategoricalIndex([1, 2, 3])) True """ + warnings.warn( + "is_categorical is deprecated and will be removed in a future version. " + "Use is_categorical_dtype instead", + FutureWarning, + stacklevel=2, + ) return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr) @@ -1458,7 +1464,7 @@ def is_extension_type(arr) -> bool: stacklevel=2, ) - if is_categorical(arr): + if is_categorical_dtype(arr): return True elif is_sparse(arr): return True diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 4be5da9c4c54a..8fe2b3c60d6d0 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -337,7 +337,6 @@ def _from_values_or_dtype( >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2) CategoricalDtype(categories=['x', 'y'], ordered=False) """ - from pandas.core.dtypes.common import is_categorical if dtype is not None: # The dtype argument takes precedence over values.dtype (if any) @@ -352,7 +351,7 @@ def _from_values_or_dtype( ) elif not isinstance(dtype, CategoricalDtype): raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}") - elif is_categorical(values): + elif cls.is_dtype(values): # If no "dtype" was passed, use the one from "values", but honor # the "ordered" and "categories" arguments dtype = values.dtype._from_categorical_dtype( diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index d5df661efa692..18e995ce4efd7 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -20,7 +20,7 @@ ) from pandas.core.dtypes.common import ( ensure_platform_int, - is_categorical, + is_categorical_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, @@ -787,7 +787,7 @@ def get_indexer( left_indexer = self.left.get_indexer(target_as_index.left) right_indexer = self.right.get_indexer(target_as_index.right) indexer = np.where(left_indexer == right_indexer, left_indexer, -1) - elif is_categorical(target_as_index): + elif is_categorical_dtype(target_as_index.dtype): # get an indexer for unique categories then propagate to codes via take_1d categories_indexer = self.get_indexer(target_as_index.categories) indexer = take_1d(categories_indexer, target_as_index.codes, fill_value=-1) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ba2fd037901a2..a404c3d11723e 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -30,7 +30,6 @@ DT64NS_DTYPE, TD64NS_DTYPE, is_bool_dtype, - is_categorical, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, @@ -2764,7 +2763,7 @@ def get_block_type(values, dtype=None): if is_sparse(dtype): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock - elif is_categorical(values): + elif is_categorical_dtype(values.dtype): cls = CategoricalBlock elif issubclass(vtype, np.datetime64): assert not is_datetime64tz_dtype(values) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 47fa0627cd1b4..607a1b75dcfcd 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -6,7 +6,7 @@ import datetime from functools import partial import string -from typing import TYPE_CHECKING, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Optional, Tuple, Union import warnings import numpy as np @@ -24,7 +24,6 @@ is_array_like, is_bool, is_bool_dtype, - is_categorical, is_categorical_dtype, is_datetime64tz_dtype, is_dtype_equal, @@ -1936,9 +1935,8 @@ def _factorize_keys( elif ( is_categorical_dtype(lk) and is_categorical_dtype(rk) and is_dtype_equal(lk, rk) ): - assert is_categorical(lk) and is_categorical(rk) - lk = cast(Categorical, lk) - rk = cast(Categorical, rk) + assert isinstance(lk, Categorical) + assert isinstance(rk, Categorical) if lk.categories.equals(rk.categories): # if we exactly match in categories, allow us to factorize on codes rk = rk.codes diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 66bf696cbe912..6e73e1542bb80 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -203,11 +203,18 @@ def test_is_scipy_sparse(): def test_is_categorical(): cat = pd.Categorical([1, 2, 3]) - assert com.is_categorical(cat) - assert com.is_categorical(pd.Series(cat)) - assert com.is_categorical(pd.CategoricalIndex([1, 2, 3])) + with tm.assert_produces_warning(FutureWarning): + assert com.is_categorical(cat) + assert com.is_categorical(pd.Series(cat)) + assert com.is_categorical(pd.CategoricalIndex([1, 2, 3])) + + assert not com.is_categorical([1, 2, 3]) - assert not com.is_categorical([1, 2, 3]) + +def test_is_categorical_deprecation(): + # GH#33385 + with tm.assert_produces_warning(FutureWarning): + com.is_categorical([1, 2, 3]) def test_is_datetime64_dtype(): diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index d0831ea514a64..519f2f3eead1c 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -159,10 +159,12 @@ def test_basic(self, dtype): assert is_categorical_dtype(s) assert not is_categorical_dtype(np.dtype("float64")) - assert is_categorical(s.dtype) - assert is_categorical(s) - assert not is_categorical(np.dtype("float64")) - assert not is_categorical(1.0) + with tm.assert_produces_warning(FutureWarning): + # GH#33385 deprecated + assert is_categorical(s.dtype) + assert is_categorical(s) + assert not is_categorical(np.dtype("float64")) + assert not is_categorical(1.0) def test_tuple_categories(self): categories = [(1, "a"), (2, "b"), (3, "c")]
xref #33377 the current behavior of is_categorical is buggy, besides its redundant with is_categorical_dtype
https://api.github.com/repos/pandas-dev/pandas/pulls/33385
2020-04-08T01:11:15Z
2020-04-08T13:37:29Z
2020-04-08T13:37:29Z
2020-04-08T15:10:30Z
FEATURE: pandas.Series.query() #22347
diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst index ab0540a930396..f70abe9a35323 100644 --- a/doc/source/reference/series.rst +++ b/doc/source/reference/series.rst @@ -73,6 +73,7 @@ Indexing, iteration Series.pop Series.item Series.xs + Series.query For more information on ``.at``, ``.iat``, ``.loc``, and ``.iloc``, see the :ref:`indexing documentation <indexing>`. @@ -148,6 +149,7 @@ Computations / descriptive stats Series.cumsum Series.describe Series.diff + Series.eval Series.factorize Series.kurt Series.mad diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst index 1d84d05fda079..6cf2bd85bf521 100644 --- a/doc/source/user_guide/enhancingperf.rst +++ b/doc/source/user_guide/enhancingperf.rst @@ -562,6 +562,59 @@ Now let's do the same thing but with comparisons: of type ``bool`` or ``np.bool_``. Again, you should perform these kinds of operations in plain Python. +The ``Series.eval`` method + +A new function added alongside the ``Series.query`` method that allows +you to evaluate an expression in the "context" of a :class:`~pandas.Series`. + +.. ipython:: python + + index = list(range(2010, 2015)) + data = np.random.randn(5) + series = pd.Series(data, index=index) + series.index.name = 'years' + series + + series.eval('(years > 2010) & (years != 2013)') + +There is an ``inplace`` keyword that currently defaults to ``True``. This maybe +change in future versions of pandas and it is recommended to use the ``inplace`` +keyword. + +If a ``series index`` does not have a name you can use "index" to reference the index. + +.. ipython:: python + + index = list(range(2010, 2015)) + data = np.random.randn(5) + series = pd.Series(data, index=index) + series + + series.eval('(index > 2010) & (index != 2013)') + +The function can also be used on a ``series`` with a :class:`~pandas.MultiIndex`. + +.. ipython:: python + + data = np.random.randn(10) + foos = np.random.choice(['foo1', 'foo2'], size=10) + years = list(range(2010, 2020)) + + data + foos + years + + index = pd.MultiIndex.from_arrays([foos, years], names=[None, 'years']) + index + + series = pd.Series(data, index=index) + series + + series.eval('ilevel_0 == "foo1" and years > 2013') + +``ilevel_0`` can be used to reference a ``MultiIndex`` by the level. In this case the +0th index did not have a name. + The ``DataFrame.eval`` method ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index fb815b3a975d1..02812bf36c9a9 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1126,7 +1126,81 @@ Mask .. _indexing.query: -The :meth:`~pandas.DataFrame.query` Method +The ``Series`` :meth:`~pandas.Series.query` Method +------------------------------------------ + +:class:`~pandas.Series` objects also have a :meth:`~pandas.Series.query` +method that allows for selection using an expression. Perviously, the +behaviour required the conversion of a ``Series`` object into a ``DataFrame`` in order to use the query method. The differences are +shown below. + +.. ipython:: python + + data = [10, 11, 12, 13, 14, 15] + index = [0, 1, 2, 3, 4, 5] + series = pd.Series(data, index=index) + series + + # previously, converting to DataFrame object + series.to_frame().query('index > 2') + + # using query method for series + series.query('index > 2') + +You can do the same with a named ``index``. + +.. ipython:: python + + data = [10, 11, 12, 13, 14, 15] + index = [0, 1, 2, 3, 4, 5] + series = pd.Series(data, index=index) + series.index.name = 'foo' + + series.query('foo > 2') + +The ``query`` method also has a ``inplace`` keyword that lets you modify +the original ``Series``. It is set to ``False`` by default. + +.. ipython:: python + series.query('foo > 2', inplace=True) + +:class:`~pandas.MultiIndex` :meth:`~pandas.Series.query` Syntax +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can use a ``Series`` with a :class:`~pandas.MultiIndex` with the +query method. + +.. ipython:: python + + data = list(range(0, 10)) + foos = np.random.choice(['foo1', 'foo2'], size=10) + bars = np.random.choice(['bar1', 'bar2'], size=10) + + data + foos + bars + + index = pd.MultiIndex.from_arrays([foos, bars], names=['foos', 'bars']) + index + + series = pd.Series(data, index=index) + series + + series.query('foos == "foo1"') + series.query('foos == "foo1" and bars == "bar2"') + +You can use special names to refer to unnamed levels of ``MultiIndex``. +The example below uses ``ilevel_1``, which is a common convention to refer +to the index at the "1st" level. + +.. ipython:: python + + series.index.names = [None, None] + series + series.query('ilevel_1 == "bar1"') + + +The ``DataFrame`` :meth:`~pandas.DataFrame.query` Method ------------------------------------------ :class:`~pandas.DataFrame` objects have a :meth:`~pandas.DataFrame.query` diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index b74f99fca21c7..139f452af3116 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -265,6 +265,9 @@ def eval( See Also -------- + Series.query : Evaluates a boolean expression to query a series. + Series.eval : Evaluate a string describing operations on + Series. DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.eval : Evaluate a string describing operations on diff --git a/pandas/core/series.py b/pandas/core/series.py index c9684d0985173..c35f56a4d7c8f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1122,6 +1122,248 @@ def _set_value(self, label, value, takeable: bool = False): # ---------------------------------------------------------------------- # Unsorted + def query(self, expr, inplace=False, **kwargs): + """ + Query the data of a Series with a boolean expression. + + Parameters + ---------- + expr : str + The query string to evaluate. + + You can refer to variables + in the environment by prefixing them with an '@' character like + '@a + b'. + + You can refer to index names that contain spaces or operators by + surrounding them in backticks. This way you can also escape + names that start with a digit, or those that are a Python keyword. + Basically when it is not valid Python identifier. See notes below + for more details. + + For example, if one of your index levels is named 'a a' and you want + to sum it with the level named 'b', your query should be '`a a` + b'. + + .. versionadded:: 0.25.0 + Backtick quoting introduced. + + .. versionadded:: 1.0.0 + Expanding functionality of backtick quoting for more than only spaces. + + inplace : bool + Whether the query should modify the data in place or return + a modified copy. + **kwargs + See the documentation for :func:`eval` for complete details + on the keyword arguments accepted by :meth:`Series.query`. + + Returns + ------- + Series + Series resulting from the provided query expression. + + See Also + -------- + eval : Evaluate a string describing operations on + Series columns. + Series.eval : Evaluate a string describing operations on + Series columns. + + Notes + ----- + The result of the evaluation of this expression is first passed to + :attr:`Series.loc` and if that fails because of a + multidimensional key (e.g., a Series) then the result will be passed + to :meth:`Series.__getitem__`;. + + This method uses the top-level :func:'eval' function to + evaluate the passed query. + + The :meth:`~pandas.Series.query` method uses a slightly + modified Python syntax by default. For example, the ``&`` and ``|`` + (bitwise) operators have the precedence of their boolean cousins, + :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, + however the semantics are different. + + You can change the semantics of the expression by passing the keyword + argument ``parser='python'``. This enforces the same semantics as + evaluation in Python space. Likewise, you can pass ``engine='python'`` + to evaluate an expression using Python itself as a backend. This is not + recommended as it is inefficient compared to using ``numexpr`` as the + engine. + + The :attr:`Series.index` attribute of the + :class:`~pandas.Series` instance are placed in the query namespace + by default, which allows you to treat both the index and columns of the + frame as a column in the frame. + The identifier ``index`` is used for the frame index; you can also + use the name of the index to identify it in a query. Please note that + Python keywords may not be used as identifiers. + + For further details and examples see the ``query`` documentation in + :ref:`indexing <indexing.query>`. + + *Backtick quoted variables* + + Backtick quoted variables are parsed as literal Python code and + are converted internally to a Python valid identifier. + This can lead to the following problems. + + During parsing a number of disallowed characters inside the backtick + quoted string are replaced by strings that are allowed as a Python identifier. + These characters include all operators in Python, the space character, the + question mark, the exclamation mark, the dollar sign, and the euro sign. + For other characters that fall outside the ASCII range (U+0001..U+007F) + and those that are not further specified in PEP 3131, + the query parser will raise an error. + This excludes whitespace different than the space character, + but also the hashtag (as it is used for comments) and the backtick + itself (backtick can also not be escaped). + + In a special case, quotes that make a pair around a backtick can + confuse the parser. + For example, ```it's` > `that's``` will raise an error, + as it forms a quoted string (``'s > `that'``) with a backtick inside. + + See also the Python documentation about lexical analysis + (https://docs.python.org/3/reference/lexical_analysis.html) + in combination with the source code in :mod:`pandas.core.computation.parsing`. + + Examples + -------- + >>> series = pd.Series(range(5)) + >>> series + 0 0 + 1 1 + 2 2 + 3 3 + 4 4 + dtype: int64 + >>> series.query('index > 2') + 3 3 + 4 4 + dtype: int64 + + If the index is named, we can refer to it in the query. + + >>> series.index.name = 'numbers' + >>> series.query('numbers > 2') + numbers + 3 3 + 4 4 + dtype: int64 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + if not isinstance(expr, str): + msg = f"expr must be a string to be evaluated, {type(expr)} given" + raise ValueError(msg) + kwargs["level"] = kwargs.pop("level", 0) + 1 + kwargs["target"] = None + res = self.eval(expr, **kwargs) + + try: + new_data = self.loc[res] + except ValueError: + # when res is multi-dimensional loc raises, but this is sometimes a + # valid query + new_data = self[res] + + if inplace: + self._update_inplace(new_data) + else: + return new_data + + def eval(self, expr, inplace=False, **kwargs): + """ + Evaluate a string describing operations on Series columns. + + Operates on columns only, not specific rows or elements. This allows + `eval` to run arbitrary code, which can make you vulnerable to code + injection if you pass user input to this function. + + Parameters + ---------- + expr : str + The expression string to evaluate. + inplace : bool, default False + If the expression contains an assignment, whether to perform the + operation inplace and mutate the existing Series. Otherwise, + a new Series is returned. + **kwargs + See the documentation for :func:`eval` for complete details + on the keyword arguments accepted by + :meth:`~pandas.Series.query`. + + Returns + ------- + ndarray, scalar, or pandas object + The result of the evaluation. + + See Also + -------- + Series.query : Evaluates a boolean expression to query the columns + of a frame. + eval : Evaluate a Python expression as a string using various + backends. + + Notes + ----- + For more details see the API documentation for :func:`~eval`. + For detailed examples see :ref:`enhancing performance with eval + <enhancingperf.eval>`. + + Examples + -------- + >>> series = pd.Series(1, index=range(5)) + >>> series + 0 1 + 1 1 + 2 1 + 3 1 + 4 1 + dtype: int64 + + Assignment is allowed though by default the original DataFrame is not + modified. + + >>> op = f"1.2 {'+'} index" + >>> series.eval(op) + 0 1.2 + 1 2.2 + 2 3.2 + 3 4.2 + 4 5.2 + dtype: float64 + + Use ``inplace=True`` to modify the original DataFrame. + + >>> dict1 = {"a": 1} + >>> dict2 = {"b": 2} + >>> series.eval("c = a + b", inplace=True, resolvers=[dict1, dict2]) + >>> series + 0 1 + 1 1 + 2 1 + 3 1 + 4 1 + c 3 + dtype: int64 + """ + from pandas.core.computation.eval import eval as _eval + + inplace = validate_bool_kwarg(inplace, "inplace") + resolvers = kwargs.pop("resolvers", None) + kwargs["level"] = kwargs.pop("level", 0) + 1 + if resolvers is None: + index_resolvers = self._get_index_resolvers() + column_resolvers = self._get_cleaned_column_resolvers() + resolvers = column_resolvers, index_resolvers + if "target" not in kwargs: + kwargs["target"] = self + kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers) + + return _eval(expr, inplace=inplace, **kwargs) + @property def _is_mixed_type(self): return False diff --git a/pandas/tests/series/test_query_eval.py b/pandas/tests/series/test_query_eval.py new file mode 100644 index 0000000000000..607536146c0ec --- /dev/null +++ b/pandas/tests/series/test_query_eval.py @@ -0,0 +1,400 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import Index, MultiIndex, Series # noqa +import pandas._testing as tm + +PARSERS = "python", "pandas" +ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne) + + +@pytest.fixture(params=PARSERS, ids=lambda x: x) +def parser(request): + return request.param + + +@pytest.fixture(params=ENGINES, ids=lambda x: x) +def engine(request): + return request.param + + +def skip_if_no_pandas_parser(parser): + if parser != "pandas": + pytest.skip(f"cannot evaluate with parser {repr(parser)}") + + +class TestSeriesEval: + # smaller hits python, larger hits numexpr + @pytest.mark.parametrize("n", [4, 4000]) + @pytest.mark.parametrize( + "op_str,op,rop", + [ + ("+", "__add__", "__radd__"), + ("-", "__sub__", "__rsub__"), + ("*", "__mul__", "__rmul__"), + ("/", "__truediv__", "__rtruediv__"), + ], + ) + def test_ops(self, op_str, op, rop, n): + # tst ops and reversed ops in evaluation + # GH7198 + series = Series(1, index=range(n)) + series.iloc[0] = 2 + m = series.mean() + + base = Series(np.tile(m, n)) # noqa + + expected = eval(f"base {op_str} series") + + # ops as strings + result = eval(f"m {op_str} series") + tm.assert_series_equal(result, expected) + + # these are commutative + if op in ["+", "*"]: + result = getattr(series, op)(m) + tm.assert_series_equal(result, expected) + + # these are not + elif op in ["-", "/"]: + result = getattr(series, rop)(m) + tm.assert_series_equal(result, expected) + + def test_series_sub_numexpr_path(self): + # GH7192: Note we need a large number of rows to ensure this + # goes through the numexpr path + series = Series(np.random.randn(25000)) + series.iloc[0:5] = np.nan + expected = 1 - np.isnan(series.iloc[0:25]) + result = (1 - np.isnan(series)).iloc[0:25] + tm.assert_series_equal(result, expected) + + def test_query_non_str(self): + # GH 11485 + series = Series({"A": [1, 2, 3]}) + + msg = "expr must be a string to be evaluated" + with pytest.raises(ValueError, match=msg): + series.query(lambda x: x.A == 1) + + with pytest.raises(ValueError, match=msg): + series.query(111) + + def test_query_empty_string(self): + # GH 13139 + series = Series({"A": [1, 2, 3]}) + + msg = "expr cannot be an empty string" + with pytest.raises(ValueError, match=msg): + series.query("") + + def test_eval_resolvers_as_list(self): + # GH 14095 + series = Series(np.random.randn(10)) + dict1 = {"a": 1} + dict2 = {"b": 2} + assert series.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"] + assert eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"] + + +class TestSeriesEvalWithSeries: + def setup_method(self, method): + self.index = Index(data=[2000, 2001, 2002], name="year") + self.series = Series(np.random.randn(3), index=self.index) + + def teardown_method(self, method): + del self.index + del self.series + + @pytest.mark.parametrize("op", ["<", "<=", ">", ">=", "==", "!="]) + def test_bool_expr(self, op, parser, engine): + res = self.series.eval(f"year {op} 2001", engine=engine, parser=parser) + data1 = eval(f"2000 {op} 2001", engine=engine, parser=parser) + data2 = eval(f"2001 {op} 2001", engine=engine, parser=parser) + data3 = eval(f"2002 {op} 2001", engine=engine, parser=parser) + expect = Series(data=[data1, data2, data3], index=self.index) + # names are not checked due to different results based on engine + # (python vs numexpr) + tm.assert_series_equal(res, expect, check_names=False) + + def test_and_bitwise_operator(self, parser, engine): + res = self.series.eval("(year < 2001) & (year != 2000)", engine=engine, + parser=parser) + expect = Series(data=[False, False, False], index=self.index) + # names are not checked due to different results based on engine + # (python vs numexpr) + tm.assert_series_equal(res, expect, check_names=False) + + def test_or_bitwise_operator(self, parser, engine): + res = self.series.eval("(year > 2001) | (year == 2000)", engine=engine, + parser=parser) + expect = Series(data=[True, False, True], index=self.index) + # names are not checked due to different results based on engine (python + # vs numexpr) + tm.assert_series_equal(res, expect, check_names=False) + + +class TestSeriesQueryByIndexMethods: + def setup_method(self, method): + self.series = Series(np.random.randn(10), index=list(range(10))) + self.frame = self.series.to_frame() + + def teardown_method(self, method): + del self.series + del self.frame + + # test the boolean operands + def test_bool_operands(self): + for op in ["<", "<=", ">", ">=", "==", "!="]: + test = "index " + op + " 5" + result = self.series.query(test) + expected = self.frame.query(test)[0] + # names are not checked since computation/eval.py adds name to + # evaluated Series + tm.assert_series_equal(result, expected, check_names=False) + + # test the operands that can join queries + def test_and_bitwise_operator(self): + test = "(index > 2) & (index < 8)" + result = self.series.query(test) + expected = self.frame.query(test)[0] + # names are not checked since computation/eval.py adds name to + # evaluated Series + tm.assert_series_equal(result, expected, check_names=False) + + def test_or_bitwise_operator(self): + test = "(index < 3) | (index > 7)" + result = self.series.query(test) + expected = self.frame.query(test)[0] + # names are not checked since computation/eval.py adds name to + # evaluated Series + tm.assert_series_equal(result, expected, check_names=False) + + +class TestSeriesQueryByMultiIndex: + def setup_method(self, method): + self.series = Series(np.random.randn(10), index=[["a"] * 5 + ["b"] * 5, list( + range(10))]) + self.frame = self.series.to_frame() + + def teardown_method(self, method): + del self.series + del self.frame + + # check against first level + def test_query_first_level(self): + test = "ilevel_0 == 'b'" + result = self.series.query(test) + expected = self.frame.query(test)[0] + # names are not checked since computation/eval.py adds name to evaluated Series + tm.assert_series_equal(result, expected, check_names=False) + + # check against not first level + def test_query_not_first_level(self): + test = "ilevel_1 > 4" + result = self.series.query(test) + expected = self.frame.query(test)[0] + # names are not checked since computation/eval.py adds name to evaluated Series + tm.assert_series_equal(result, expected, check_names=False) + + def test_both_levels(self): + for op in ["&", "|"]: + test = f"(ilevel_0 == 'b') " + op + " ((ilevel_1 % 2) == 0)" + result = self.series.query(test) + expected = self.frame.query(test)[0] + # names are not checked since computation/eval.py adds name to evaluated + # Series + tm.assert_series_equal(result, expected, check_names=False) + + +def run_test(series, test): + frame = series.to_frame() + result = series.query(test) + expected = frame.query(test)[0] + tm.assert_series_equal(result, expected, check_names=False) + + +class TestSeriesQueryByIndex: + def setup_method(self, method): + self.series = Series(np.random.randn(10), index=list(range(10))) + self.frame = self.series.to_frame() + + def teardown_method(self, method): + del self.series + del self.frame + + # test the boolean operands + def test_bool_operands(self): + for op in ["<", "<=", ">", ">=", "==", "!="]: + run_test(self.series, "index " + op + " 5") + run_test(self.series, "5 " + op + " index") + + # test list equality + def test_list_equality(self): + for op in ["==", "!="]: + run_test(self.series, "index " + op + " [5]") + run_test(self.series, "[5] " + op + " index") + + # test the operands that can join queries + def test_and_bitwise_operator(self): + run_test(self.series, "(index > 2) & (index < 8)") + + def test_or_bitwise_operator(self): + run_test(self.series, "(index < 3) | (index > 7)") + + # test in and not in + def test_in(self): + run_test(self.series, "'a' in index") + run_test(self.series, "['a'] in index") + + def test_not_in(self): + run_test(self.series, "'a' not in index") + run_test(self.series, "['a'] not in index") + + +class TestSeriesQueryBacktickQuoting: + def test_single_backtick(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="B B")) + run_test(series, "1 < `B B`") + + def test_double_backtick(self): + series = Series(np.random.randn(10), index=MultiIndex.from_arrays( + [list(range(10)), list(range(10))], names=["B B", "C C"])) + run_test(series, "1 < `B B` and 4 < `C C`") + + def test_already_underscore(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="B_B")) + run_test(series, "1 < `B_B`") + + def test_same_name_but_underscore(self): + series = Series(np.random.randn(10), index=MultiIndex.from_arrays( + [list(range(10)), list(range(10))], names=["C_C", "C C"])) + run_test(series, "1 < `C_C` and 4 < `C C`") + + def test_underscore_and_spaces(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="B_B B")) + run_test(series, "1 < `B_B B`") + + def test_special_character_dot(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="B.B")) + run_test(series, "1 < `B.B`") + + def test_special_character_hyphen(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="B-B")) + run_test(series, "1 < `B-B`") + + def test_start_with_digit(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="1e1")) + run_test(series, "1 < `1e1`") + + def test_keyword(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="def")) + run_test(series, "1 < `def`") + + def test_empty_string(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="")) + run_test(series, "1 < ``") + + def test_spaces(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name=" ")) + run_test(series, "1 < ` `") + + def test_parenthesis(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="(xyz)")) + run_test(series, "1 < `(xyz)`") + + def test_many_symbols(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name= + " &^ :!€$?(} > <++*'' ")) + run_test(series, "1 < ` &^ :!€$?(} > <++*'' `") + + def test_failing_character_outside_range(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="☺")) + with pytest.raises(SyntaxError): + series.query("`☺` > 4") + + def test_failing_hashtag(self): + series = Series(np.random.randn(10), index=Index(list(range(10)), name="foo#bar" + )) + with pytest.raises(SyntaxError): + series.query("`foo#bar` > 4") + + +class TestSeriesQueryWithMultiIndex: + def setup_method(self, method): + multiIndex = MultiIndex.from_arrays([["a"] * 5 + ["b"] * 5, [str(x) for x in + range(10)]], names=["alpha", "num"]) + self.series = Series(np.random.randn(10), index=multiIndex) + + def teardown_method(self, method): + del self.series + + # test against level 0 + def test_equality(self): + for op in ["==", "!="]: + run_test(self.series, "alpha " + op + " 'b'") + run_test(self.series, "'b' " + op + " alpha") + + def test_list_equality(self): + for op in ["==", "!="]: + run_test(self.series, "alpha " + op + " ['b']") + run_test(self.series, "['b'] " + op + " alpha") + + def test_in_operators(self): + for op in ["in", "not in"]: + run_test(self.series, "['b'] " + op + " alpha") + run_test(self.series, "'b' " + op + " alpha") + + # test against level 1 + def test_equality_level1(self): + for op in ["==", "!="]: + run_test(self.series, "num " + op + " '3'") + run_test(self.series, "'3' " + op + " num") + + def test_list_equality_level1(self): + for op in ["==", "!="]: + run_test(self.series, "num " + op + " ['3']") + run_test(self.series, "['3'] " + op + " num") + + def test_in_operator(self): + for op in ["in", "not in"]: + run_test(self.series, "['3'] " + op + " num") + run_test(self.series, "'3' " + op + " num") + + +class TestSeriesQueryByUnamedMultiIndex: + def setup_method(self, method): + self.series = Series(np.random.randn(10), index=[["a"] * 5 + ["b"] * 5, list( + range(10))]) + + def teardown_method(self, method): + del self.series + + # check against first level + def test_query_first_level(self): + run_test(self.series, "ilevel_0 == 'b'") + run_test(self.series, "'b' == ilevel_0") + + # check against not first level + def test_query_not_first_level(self): + run_test(self.series, "ilevel_1 > 4") + run_test(self.series, "4 > ilevel_1") + + def test_both_levels(self): + for op in ["&", "|"]: + run_test(self.series, "(ilevel_0 == 'b') " + op + " ((ilevel_1 % 2) == 0)") + run_test(self.series, "((ilevel_1 % 2) == 0) " + op + " (ilevel_0 == 'b')") + + def test_levels_equality(self): + index = [np.random.randint(5, size=100), np.random.randint(5, size=100)] + series = Series(np.random.randn(100), index=index) + + # test equality + run_test(series, "ilevel_0 == ilevel_1") + run_test(series, "ilevel_1 == ilevel_0") + + # test inequality + run_test(series, "ilevel_0 != ilevel_1") + run_test(series, "ilevel_1 != ilevel_0")
https://api.github.com/repos/pandas-dev/pandas/pulls/33384
2020-04-08T00:46:16Z
2020-05-29T18:03:01Z
null
2020-05-29T18:03:02Z
FEATURE: pandas.Series.query()(#22347)
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 8901efad56f79..a10f2ec5efaa3 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -342,6 +342,17 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then pytest -q --doctest-modules pandas/tseries/ RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests base.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/base.py + RET=$(($RET + $?)) ; echo $MSG "DONE" + + MSG='Doctests construction.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/construction.py + RET=$(($RET + $?)) ; echo $MSG "DONE" + + MSG='Doctests generic.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/generic.py + RET=$(($RET + $?)) ; echo $MSG "DONE" fi ### DOCSTRINGS ### diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 97a7f22df3985..df86e028ff159 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -503,10 +503,7 @@ Other - Fixed bug in :func:`pandas.testing.assert_series_equal` where dtypes were checked for ``Interval`` and ``ExtensionArray`` operands when ``check_dtype`` was ``False`` (:issue:`32747`) - Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`) - Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`) -- Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`) - .. --------------------------------------------------------------------------- - .. _whatsnew_110.contributors: Contributors diff --git a/pandas/conftest.py b/pandas/conftest.py index e1088dae3925a..758939633d5c6 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -369,6 +369,9 @@ def _create_multiindex(): ) return mi + class TestSubDict(dict): + def __init__(self, *args, **kwargs): + dict.__init__(self, *args, **kwargs) def _create_mi_with_dt64tz_level(): """ @@ -439,6 +442,9 @@ def string_series(): s.name = "series" return s + # See Also: tests.multi.conftest.idx + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) @pytest.fixture def object_series(): @@ -744,7 +750,7 @@ def all_logical_operators(request): * & * ^ """ - return request.param + return pytestconfig.getoption("--strict-data-files") # ---------------------------------------------------------------- @@ -757,6 +763,9 @@ def strict_data_files(pytestconfig): """ return pytestconfig.getoption("--strict-data-files") + Returns + ------- + path including ``pandas/tests``. @pytest.fixture def datapath(strict_data_files): @@ -1189,6 +1198,7 @@ def ip(): return InteractiveShell() +_cython_table = pd.core.base.SelectionMixin._cython_table.items() @pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"]) def spmatrix(request): @@ -1199,6 +1209,12 @@ def spmatrix(request): return getattr(sparse, request.param + "_matrix") + Parameters + ---------- + ndframe : DataFrame or Series + func_names_and_expected : Sequence of two items + The first item is a name of a NDFrame method ('sum', 'prod') etc. + The second item is the expected return value. @pytest.fixture(params=list(tm.cython_table)) def cython_table_items(request): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index aedbba755227d..5d5fe38552a0f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8022,7 +8022,6 @@ def blk_func(values): if numeric_only is None: data = self values = data.values - try: result = f(values) @@ -8036,7 +8035,6 @@ def blk_func(values): values = data.values with np.errstate(all="ignore"): result = f(values) - else: if numeric_only: data = _get_data(axis_matters=True) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ba2fd037901a2..ebf763f601723 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -27,13 +27,8 @@ soft_convert_objects, ) from pandas.core.dtypes.common import ( - DT64NS_DTYPE, - TD64NS_DTYPE, - is_bool_dtype, - is_categorical, is_categorical_dtype, is_datetime64_dtype, - is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, @@ -2000,6 +1995,9 @@ class DatetimeLikeBlockMixin: def _holder(self): return DatetimeArray + def should_store(self, value): + return is_dtype_equal(self.dtype, value.dtype) + @property def fill_value(self): return np.datetime64("NaT", "ns") diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index a71b4a0983c63..208caf89957e7 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1626,6 +1626,17 @@ def test_reindex_methods(self, method, expected_values): actual = df[::-1].reindex(target, method=switched_method) tm.assert_frame_equal(expected, actual) + def test_reindex_subclass(self): + # https://github.com/pandas-dev/pandas/issues/31925 + class MyDataFrame(DataFrame): + pass + + expected = DataFrame() + df = MyDataFrame() + result = df.reindex_like(expected) + + tm.assert_frame_equal(result, expected) + def test_reindex_methods_nearest_special(self): df = pd.DataFrame({"x": list(range(5))}) target = np.array([-0.1, 0.9, 1.1, 1.5]) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 2c8261a6dcc5a..6c7fbce6ce35f 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -167,11 +167,7 @@ def test_downcast(self): o = self._construct(shape=4, value=9.5) result = o.copy() - result._mgr = o._mgr.downcast() - self._compare(result, o) - def test_constructor_compound_dtypes(self): - # see gh-5191 # Compound dtypes should raise NotImplementedError. def f(dtype):
https://api.github.com/repos/pandas-dev/pandas/pulls/33383
2020-04-08T00:45:11Z
2020-04-08T00:45:33Z
null
2020-04-08T00:45:33Z
DOC: Fix capitalisation in doc/source/whatsnew - part1 (issue #32550)
diff --git a/doc/source/whatsnew/v0.4.x.rst b/doc/source/whatsnew/v0.4.x.rst index 8e41e528f5b75..0ed7bb396674e 100644 --- a/doc/source/whatsnew/v0.4.x.rst +++ b/doc/source/whatsnew/v0.4.x.rst @@ -1,7 +1,7 @@ .. _whatsnew_04x: -v.0.4.1 through v0.4.3 (September 25 - October 9, 2011) -------------------------------------------------------- +Versions 0.4.1 through 0.4.3 (September 25 - October 9, 2011) +------------------------------------------------------------- {{ header }} diff --git a/doc/source/whatsnew/v0.5.0.rst b/doc/source/whatsnew/v0.5.0.rst index 37c52ac7bb34e..7ccb141260f18 100644 --- a/doc/source/whatsnew/v0.5.0.rst +++ b/doc/source/whatsnew/v0.5.0.rst @@ -1,8 +1,8 @@ .. _whatsnew_050: -v.0.5.0 (October 24, 2011) --------------------------- +Version 0.5.0 (October 24, 2011) +-------------------------------- {{ header }} diff --git a/doc/source/whatsnew/v0.6.0.rst b/doc/source/whatsnew/v0.6.0.rst index 973ba897b3234..f984b9ad71b63 100644 --- a/doc/source/whatsnew/v0.6.0.rst +++ b/doc/source/whatsnew/v0.6.0.rst @@ -1,7 +1,7 @@ .. _whatsnew_060: -v.0.6.0 (November 25, 2011) ---------------------------- +Version 0.6.0 (November 25, 2011) +--------------------------------- {{ header }} diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py index edc9730db58e5..907db4ab4c7ce 100755 --- a/scripts/validate_rst_title_capitalization.py +++ b/scripts/validate_rst_title_capitalization.py @@ -99,6 +99,18 @@ "BusinessHour", "BusinessDay", "DateOffset", + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", "Float64Index", }
- [ ] modify files index.rst, v0.4.x.rst, v0.5.0.rst, v0.6.0.rst - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/33382
2020-04-07T21:21:14Z
2020-04-11T16:18:06Z
2020-04-11T16:18:05Z
2020-04-15T16:21:08Z
BUG: iloc setting columns not taking effect
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index b74399ed86fbd..20dcc7c3dce5f 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1557,7 +1557,16 @@ def _setitem_with_indexer(self, indexer, value): (blk,) = self.obj._mgr.blocks if 1 < blk.ndim: # in case of dict, keys are indices val = list(value.values()) if isinstance(value, dict) else value - take_split_path = not blk._can_hold_element(val) + + # https://github.com/pandas-dev/pandas/issues/33198 + if ( + isinstance(indexer, tuple) + and com.is_null_slice(indexer[0]) + and self.ndim == len(indexer) + ): + take_split_path = True + else: + take_split_path = not blk._can_hold_element(val) # if we have any multi-indexes that have non-trivial slices # (not null slices) then we must take the split path, xref diff --git a/pandas/tests/frame/indexing/test_iloc.py b/pandas/tests/frame/indexing/test_iloc.py new file mode 100644 index 0000000000000..a29ab65554701 --- /dev/null +++ b/pandas/tests/frame/indexing/test_iloc.py @@ -0,0 +1,16 @@ +import numpy as np + +import pandas as pd +import pandas._testing as tm + + +def test_unsure_about_the_name_and_location(): + # https://github.com/pandas-dev/pandas/issues/33198 + arr = np.random.randn(10 ** 6).reshape(500, 2000).astype(np.float64) + df = pd.DataFrame(arr) + df.iloc[:, 1000:] = df.iloc[:, 1000:].astype(np.float32) + expected = pd.Series( + {np.dtype("float32"): 1000, np.dtype("float64"): 1000}, dtype="int64" + ) + result = df.dtypes.value_counts() + tm.assert_series_equal(result, expected)
- [x] closes #33198 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33381
2020-04-07T20:47:32Z
2020-04-16T17:24:40Z
null
2021-05-03T12:28:30Z
API/TST: Call __finalize__ in more places
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py new file mode 100644 index 0000000000000..d307eef8beb62 --- /dev/null +++ b/pandas/tests/generic/test_finalize.py @@ -0,0 +1,782 @@ +""" +An exhaustive list of pandas methods exercising NDFrame.__finalize__. +""" +import operator +import re + +import numpy as np +import pytest + +import pandas as pd + +# TODO: +# * Binary methods (mul, div, etc.) +# * Binary outputs (align, etc.) +# * top-level methods (concat, merge, get_dummies, etc.) +# * window +# * cumulative reductions + +not_implemented_mark = pytest.mark.xfail(reason="not implemented") + +mi = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["A", "B"]) + +frame_data = ({"A": [1]},) +frame_mi_data = ({"A": [1, 2, 3, 4]}, mi) + + +# Tuple of +# - Callable: Constructor (Series, DataFrame) +# - Tuple: Constructor args +# - Callable: pass the constructed value with attrs set to this. + +_all_methods = [ + ( + pd.Series, + (np.array([0], dtype="float64")), + operator.methodcaller("view", "int64"), + ), + (pd.Series, ([0],), operator.methodcaller("take", [])), + (pd.Series, ([0],), operator.methodcaller("__getitem__", [True])), + (pd.Series, ([0],), operator.methodcaller("repeat", 2)), + pytest.param( + (pd.Series, ([0],), operator.methodcaller("reset_index")), + marks=pytest.mark.xfail, + ), + (pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)), + pytest.param( + (pd.Series, ([0],), operator.methodcaller("to_frame")), marks=pytest.mark.xfail + ), + (pd.Series, (0, mi), operator.methodcaller("count", level="A")), + (pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")), + (pd.Series, ([0, 0],), operator.methodcaller("duplicated")), + (pd.Series, ([0, 0],), operator.methodcaller("round")), + (pd.Series, ([0, 0],), operator.methodcaller("rename", lambda x: x + 1)), + (pd.Series, ([0, 0],), operator.methodcaller("rename", "name")), + (pd.Series, ([0, 0],), operator.methodcaller("set_axis", ["a", "b"])), + (pd.Series, ([0, 0],), operator.methodcaller("reindex", [1, 0])), + (pd.Series, ([0, 0],), operator.methodcaller("drop", [0])), + (pd.Series, (pd.array([0, pd.NA]),), operator.methodcaller("fillna", 0)), + (pd.Series, ([0, 0],), operator.methodcaller("replace", {0: 1})), + (pd.Series, ([0, 0],), operator.methodcaller("shift")), + (pd.Series, ([0, 0],), operator.methodcaller("isin", [0, 1])), + (pd.Series, ([0, 0],), operator.methodcaller("between", 0, 2)), + (pd.Series, ([0, 0],), operator.methodcaller("isna")), + (pd.Series, ([0, 0],), operator.methodcaller("isnull")), + (pd.Series, ([0, 0],), operator.methodcaller("notna")), + (pd.Series, ([0, 0],), operator.methodcaller("notnull")), + (pd.Series, ([1],), operator.methodcaller("add", pd.Series([1]))), + # TODO: mul, div, etc. + ( + pd.Series, + ([0], pd.period_range("2000", periods=1)), + operator.methodcaller("to_timestamp"), + ), + ( + pd.Series, + ([0], pd.date_range("2000", periods=1)), + operator.methodcaller("to_period"), + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("dot", pd.DataFrame(index=["A"])), + ), + marks=pytest.mark.xfail(reason="Implement binary finalize"), + ), + (pd.DataFrame, frame_data, operator.methodcaller("transpose")), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", "A")), + marks=not_implemented_mark, + ), + (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", ["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", np.array([True]))), + (pd.DataFrame, ({("A", "a"): [1]},), operator.methodcaller("__getitem__", ["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("query", "A == 1")), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("eval", "A + 1")), + marks=not_implemented_mark, + ), + (pd.DataFrame, frame_data, operator.methodcaller("select_dtypes", include="int")), + (pd.DataFrame, frame_data, operator.methodcaller("assign", b=1)), + (pd.DataFrame, frame_data, operator.methodcaller("set_axis", ["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("reindex", [0, 1])), + (pd.DataFrame, frame_data, operator.methodcaller("drop", columns=["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("drop", index=[0])), + (pd.DataFrame, frame_data, operator.methodcaller("rename", columns={"A": "a"})), + (pd.DataFrame, frame_data, operator.methodcaller("rename", index=lambda x: x)), + (pd.DataFrame, frame_data, operator.methodcaller("fillna", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("fillna", method="ffill")), + (pd.DataFrame, frame_data, operator.methodcaller("set_index", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("reset_index")), + (pd.DataFrame, frame_data, operator.methodcaller("isna")), + (pd.DataFrame, frame_data, operator.methodcaller("isnull")), + (pd.DataFrame, frame_data, operator.methodcaller("notna")), + (pd.DataFrame, frame_data, operator.methodcaller("notnull")), + (pd.DataFrame, frame_data, operator.methodcaller("dropna")), + (pd.DataFrame, frame_data, operator.methodcaller("drop_duplicates")), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("duplicated")), + marks=not_implemented_mark, + ), + (pd.DataFrame, frame_data, operator.methodcaller("sort_values", by="A")), + (pd.DataFrame, frame_data, operator.methodcaller("sort_index")), + (pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")), + (pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")), + (pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel"),), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("add", pd.DataFrame(*frame_data)), + ), + marks=not_implemented_mark, + ), + # TODO: div, mul, etc. + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("combine", pd.DataFrame(*frame_data), operator.add), + ), + marks=not_implemented_mark, + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("combine_first", pd.DataFrame(*frame_data)), + ), + marks=not_implemented_mark, + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("update", pd.DataFrame(*frame_data)), + ), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("pivot", columns="A")), + marks=not_implemented_mark, + ), + pytest.param( + ( + pd.DataFrame, + {"A": [1], "B": [1]}, + operator.methodcaller("pivot_table", columns="A"), + ), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("stack")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("explode", "A")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_mi_data, operator.methodcaller("unstack"),), + marks=not_implemented_mark, + ), + pytest.param( + ( + pd.DataFrame, + ({"A": ["a", "b", "c"], "B": [1, 3, 5], "C": [2, 4, 6]},), + operator.methodcaller("melt", id_vars=["A"], value_vars=["B"]), + ), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("diff")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("applymap", lambda x: x)), + marks=not_implemented_mark, + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("append", pd.DataFrame({"A": [1]})), + ), + marks=not_implemented_mark, + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("append", pd.DataFrame({"B": [1]})), + ), + marks=not_implemented_mark, + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("merge", pd.DataFrame({"A": [1]})), + ), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("round", 2)), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("corr")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("cov")), + marks=[ + not_implemented_mark, + pytest.mark.filterwarnings("ignore::RuntimeWarning"), + ], + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("corrwith", pd.DataFrame(*frame_data)), + ), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("count")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_mi_data, operator.methodcaller("count", level="A")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("nunique")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("idxmin")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("idxmax")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("mode")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("quantile")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("quantile", q=[0.25, 0.75])), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("quantile")), + marks=not_implemented_mark, + ), + ( + pd.DataFrame, + ({"A": [1]}, [pd.Period("2000", "D")]), + operator.methodcaller("to_timestamp"), + ), + ( + pd.DataFrame, + ({"A": [1]}, [pd.Timestamp("2000")]), + operator.methodcaller("to_period", freq="D"), + ), + pytest.param( + (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", [1])), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", pd.Series([1]))), + marks=not_implemented_mark, + ), + pytest.param( + ( + pd.DataFrame, + frame_mi_data, + operator.methodcaller("isin", pd.DataFrame({"A": [1]})), + ), + marks=not_implemented_mark, + ), + (pd.DataFrame, frame_data, operator.methodcaller("swapaxes", 0, 1)), + (pd.DataFrame, frame_mi_data, operator.methodcaller("droplevel", "A")), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("pop", "A")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("squeeze")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.Series, ([1, 2],), operator.methodcaller("squeeze")), + # marks=not_implemented_mark, + ), + (pd.Series, ([1, 2],), operator.methodcaller("rename_axis", index="a")), + (pd.DataFrame, frame_data, operator.methodcaller("rename_axis", columns="a")), + # Unary ops + (pd.DataFrame, frame_data, operator.neg), + (pd.Series, [1], operator.neg), + (pd.DataFrame, frame_data, operator.pos), + (pd.Series, [1], operator.pos), + (pd.DataFrame, frame_data, operator.inv), + (pd.Series, [1], operator.inv), + (pd.DataFrame, frame_data, abs), + pytest.param((pd.Series, [1], abs), marks=not_implemented_mark), + pytest.param((pd.DataFrame, frame_data, round), marks=not_implemented_mark), + (pd.Series, [1], round), + (pd.DataFrame, frame_data, operator.methodcaller("take", [0, 0])), + (pd.DataFrame, frame_mi_data, operator.methodcaller("xs", "a")), + (pd.Series, (1, mi), operator.methodcaller("xs", "a")), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("get", "A")), + marks=not_implemented_mark, + ), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("reindex_like", pd.DataFrame({"A": [1, 2, 3]})), + ), + ( + pd.Series, + frame_data, + operator.methodcaller("reindex_like", pd.Series([0, 1, 2])), + ), + (pd.DataFrame, frame_data, operator.methodcaller("add_prefix", "_")), + (pd.DataFrame, frame_data, operator.methodcaller("add_suffix", "_")), + (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_prefix", "_")), + (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_suffix", "_")), + (pd.Series, ([3, 2],), operator.methodcaller("sort_values")), + (pd.Series, ([1] * 10,), operator.methodcaller("head")), + (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("head")), + (pd.Series, ([1] * 10,), operator.methodcaller("tail")), + (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("tail")), + (pd.Series, ([1, 2],), operator.methodcaller("sample", n=2, replace=True)), + (pd.DataFrame, (frame_data,), operator.methodcaller("sample", n=2, replace=True)), + (pd.Series, ([1, 2],), operator.methodcaller("astype", float)), + (pd.DataFrame, frame_data, operator.methodcaller("astype", float)), + (pd.Series, ([1, 2],), operator.methodcaller("copy")), + (pd.DataFrame, frame_data, operator.methodcaller("copy")), + (pd.Series, ([1, 2], None, object), operator.methodcaller("infer_objects")), + ( + pd.DataFrame, + ({"A": np.array([1, 2], dtype=object)},), + operator.methodcaller("infer_objects"), + ), + (pd.Series, ([1, 2],), operator.methodcaller("convert_dtypes")), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")), + marks=not_implemented_mark, + ), + (pd.Series, ([1, None, 3],), operator.methodcaller("interpolate")), + (pd.DataFrame, ({"A": [1, None, 3]},), operator.methodcaller("interpolate")), + (pd.Series, ([1, 2],), operator.methodcaller("clip", lower=1)), + (pd.DataFrame, frame_data, operator.methodcaller("clip", lower=1)), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("asfreq", "H"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("asfreq", "H"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("at_time", "12:00"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("at_time", "12:00"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("between_time", "12:00", "13:00"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("between_time", "12:00", "13:00"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("first", "3D"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("first", "3D"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("last", "3D"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("last", "3D"), + ), + (pd.Series, ([1, 2],), operator.methodcaller("rank")), + (pd.DataFrame, frame_data, operator.methodcaller("rank")), + (pd.Series, ([1, 2],), operator.methodcaller("where", np.array([True, False]))), + (pd.DataFrame, frame_data, operator.methodcaller("where", np.array([[True]]))), + (pd.Series, ([1, 2],), operator.methodcaller("mask", np.array([True, False]))), + (pd.DataFrame, frame_data, operator.methodcaller("mask", np.array([[True]]))), + (pd.Series, ([1, 2],), operator.methodcaller("slice_shift")), + (pd.DataFrame, frame_data, operator.methodcaller("slice_shift")), + (pd.Series, (1, pd.date_range("2000", periods=4)), operator.methodcaller("tshift")), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("tshift"), + ), + (pd.Series, ([1, 2],), operator.methodcaller("truncate", before=0)), + (pd.DataFrame, frame_data, operator.methodcaller("truncate", before=0)), + ( + pd.Series, + (1, pd.date_range("2000", periods=4, tz="UTC")), + operator.methodcaller("tz_convert", "CET"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4, tz="UTC")), + operator.methodcaller("tz_convert", "CET"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("tz_localize", "CET"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("tz_localize", "CET"), + ), + pytest.param( + (pd.Series, ([1, 2],), operator.methodcaller("describe")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("describe")), + marks=not_implemented_mark, + ), + (pd.Series, ([1, 2],), operator.methodcaller("pct_change")), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("pct_change")), + marks=not_implemented_mark, + ), + (pd.Series, ([1],), operator.methodcaller("transform", lambda x: x - x.min())), + pytest.param( + ( + pd.DataFrame, + frame_mi_data, + operator.methodcaller("transform", lambda x: x - x.min()), + ), + marks=not_implemented_mark, + ), + (pd.Series, ([1],), operator.methodcaller("apply", lambda x: x)), + pytest.param( + (pd.DataFrame, frame_mi_data, operator.methodcaller("apply", lambda x: x)), + marks=not_implemented_mark, + ), + # Cumulative reductions + (pd.Series, ([1],), operator.methodcaller("cumsum")), + (pd.DataFrame, frame_data, operator.methodcaller("cumsum")), + # Reductions + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("any")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("sum")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("std")), + marks=not_implemented_mark, + ), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("mean")), + marks=not_implemented_mark, + ), +] + + +def idfn(x): + xpr = re.compile(r"'(.*)?'") + m = xpr.search(str(x)) + if m: + return m.group(1) + else: + return str(x) + + +@pytest.fixture(params=_all_methods, ids=lambda x: idfn(x[-1])) +def ndframe_method(request): + """ + An NDFrame method returning an NDFrame. + """ + return request.param + + +def test_finalize_called(ndframe_method): + cls, init_args, method = ndframe_method + ndframe = cls(*init_args) + + ndframe.attrs = {"a": 1} + result = method(ndframe) + + assert result.attrs == {"a": 1} + + +# ---------------------------------------------------------------------------- +# Binary operations + + +@pytest.mark.parametrize("annotate", ["left", "right", "both"]) +@pytest.mark.parametrize( + "args", + [ + (1, pd.Series([1])), + (1, pd.DataFrame({"A": [1]})), + (pd.Series([1]), 1), + (pd.DataFrame({"A": [1]}), 1), + (pd.Series([1]), pd.Series([1])), + (pd.DataFrame({"A": [1]}), pd.DataFrame({"A": [1]})), + (pd.Series([1]), pd.DataFrame({"A": [1]})), + (pd.DataFrame({"A": [1]}), pd.Series([1])), + ], +) +def test_binops(args, annotate, all_arithmetic_functions): + # This generates 326 tests... Is that needed? + left, right = args + if annotate == "both" and isinstance(left, int) or isinstance(right, int): + return + + if isinstance(left, pd.DataFrame) or isinstance(right, pd.DataFrame): + pytest.xfail(reason="not implemented") + + if annotate in {"left", "both"} and not isinstance(left, int): + left.attrs = {"a": 1} + if annotate in {"left", "both"} and not isinstance(right, int): + right.attrs = {"a": 1} + + result = all_arithmetic_functions(left, right) + assert result.attrs == {"a": 1} + + +# ---------------------------------------------------------------------------- +# Accessors + + +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("capitalize"), + operator.methodcaller("casefold"), + pytest.param( + operator.methodcaller("cat", ["a"]), + marks=pytest.mark.xfail(reason="finalize not called."), + ), + operator.methodcaller("contains", "a"), + operator.methodcaller("count", "a"), + operator.methodcaller("encode", "utf-8"), + operator.methodcaller("endswith", "a"), + pytest.param( + operator.methodcaller("extract", r"(\w)(\d)"), + marks=pytest.mark.xfail(reason="finalize not called."), + ), + pytest.param( + operator.methodcaller("extract", r"(\w)(\d)"), + marks=pytest.mark.xfail(reason="finalize not called."), + ), + operator.methodcaller("find", "a"), + operator.methodcaller("findall", "a"), + operator.methodcaller("get", 0), + operator.methodcaller("index", "a"), + operator.methodcaller("len"), + operator.methodcaller("ljust", 4), + operator.methodcaller("lower"), + operator.methodcaller("lstrip"), + operator.methodcaller("match", r"\w"), + operator.methodcaller("normalize", "NFC"), + operator.methodcaller("pad", 4), + operator.methodcaller("partition", "a"), + operator.methodcaller("repeat", 2), + operator.methodcaller("replace", "a", "b"), + operator.methodcaller("rfind", "a"), + operator.methodcaller("rindex", "a"), + operator.methodcaller("rjust", 4), + operator.methodcaller("rpartition", "a"), + operator.methodcaller("rstrip"), + operator.methodcaller("slice", 4), + operator.methodcaller("slice_replace", 1, repl="a"), + operator.methodcaller("startswith", "a"), + operator.methodcaller("strip"), + operator.methodcaller("swapcase"), + operator.methodcaller("translate", {"a": "b"}), + operator.methodcaller("upper"), + operator.methodcaller("wrap", 4), + operator.methodcaller("zfill", 4), + operator.methodcaller("isalnum"), + operator.methodcaller("isalpha"), + operator.methodcaller("isdigit"), + operator.methodcaller("isspace"), + operator.methodcaller("islower"), + operator.methodcaller("isupper"), + operator.methodcaller("istitle"), + operator.methodcaller("isnumeric"), + operator.methodcaller("isdecimal"), + operator.methodcaller("get_dummies"), + ], + ids=idfn, +) +@not_implemented_mark +def test_string_method(method): + s = pd.Series(["a1"]) + s.attrs = {"a": 1} + result = method(s.str) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("to_period"), + operator.methodcaller("tz_localize", "CET"), + operator.methodcaller("normalize"), + operator.methodcaller("strftime", "%Y"), + operator.methodcaller("round", "H"), + operator.methodcaller("floor", "H"), + operator.methodcaller("ceil", "H"), + operator.methodcaller("month_name"), + operator.methodcaller("day_name"), + ], + ids=idfn, +) +@not_implemented_mark +def test_datetime_method(method): + s = pd.Series(pd.date_range("2000", periods=4)) + s.attrs = {"a": 1} + result = method(s.dt) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "attr", + [ + "date", + "time", + "timetz", + "year", + "month", + "day", + "hour", + "minute", + "second", + "microsecond", + "nanosecond", + "week", + "weekofyear", + "dayofweek", + "dayofyear", + "quarter", + "is_month_start", + "is_month_end", + "is_quarter_start", + "is_quarter_end", + "is_year_start", + "is_year_end", + "is_leap_year", + "daysinmonth", + "days_in_month", + ], +) +@not_implemented_mark +def test_datetime_property(attr): + s = pd.Series(pd.date_range("2000", periods=4)) + s.attrs = {"a": 1} + result = getattr(s.dt, attr) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "attr", ["days", "seconds", "microseconds", "nanoseconds", "components"] +) +@not_implemented_mark +def test_timedelta_property(attr): + s = pd.Series(pd.timedelta_range("2000", periods=4)) + s.attrs = {"a": 1} + result = getattr(s.dt, attr) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "method", [operator.methodcaller("total_seconds")], +) +@not_implemented_mark +def test_timedelta_methods(method): + s = pd.Series(pd.timedelta_range("2000", periods=4)) + s.attrs = {"a": 1} + result = method(s.dt) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("add_categories", ["c"]), + operator.methodcaller("as_ordered"), + operator.methodcaller("as_unordered"), + lambda x: getattr(x, "codes"), + operator.methodcaller("remove_categories", "a"), + operator.methodcaller("remove_unused_categories"), + operator.methodcaller("rename_categories", {"a": "A", "b": "B"}), + operator.methodcaller("reorder_categories", ["b", "a"]), + operator.methodcaller("set_categories", ["A", "B"]), + ], +) +@not_implemented_mark +def test_categorical_accessor(method): + s = pd.Series(["a", "b"], dtype="category") + s.attrs = {"a": 1} + result = method(s.cat) + assert result.attrs == {"a": 1} + + +# ---------------------------------------------------------------------------- +# Groupby + + +@pytest.mark.parametrize( + "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})] +) +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("sum"), + lambda x: x.agg("sum"), + lambda x: x.agg(["sum", "count"]), + lambda x: x.transform(lambda y: y), + lambda x: x.apply(lambda y: y), + ], +) +@not_implemented_mark +def test_groupby(obj, method): + obj.attrs = {"a": 1} + result = method(obj.groupby([0, 0])) + assert result.attrs == {"a": 1}
Progress towards https://github.com/pandas-dev/pandas/issues/28283. This adds tests that ensures `NDFrame.__finalize__` is called in more places. Thus far I've added tests for anything that meets the following rule: > Pandas calls `NDFrame.__finalize__` on any NDFrame method that returns > another NDFrame. I think that given the generality of `__finalize__`, making any kind of list of which methods should call it is going to be somewhat arbitrary. That rule errs on the side of calling it too often, which I think is my preference. *For reviewers* I think the two most helpful pieces would be 1. Do you agree with the general rule of `NDFrame -> NDFrame` should call `__finalize__`? 2. A spot check on the tests I've added If you don't agree with the general rule, then we'll need to go through method by method and determine whether that method should call `__finalize__`. Once we have agreement that the tests I've added accurately reflect the desired outcome, this can be merged and we can fix `xfails` in smaller batches.
https://api.github.com/repos/pandas-dev/pandas/pulls/33379
2020-04-07T19:57:53Z
2020-04-10T17:53:08Z
2020-04-10T17:53:08Z
2022-11-27T21:55:32Z
DOC: Fix capitalization among headings in doc/source/whatsnew (#32550)
diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst index 6eb509a258430..7390b80217b2c 100644 --- a/doc/source/whatsnew/v0.19.0.rst +++ b/doc/source/whatsnew/v0.19.0.rst @@ -377,7 +377,7 @@ For ``MultiIndex``, values are dropped if any level is missing by default. Speci .. _whatsnew_0190.gbq: -Google BigQuery Enhancements +Google BigQuery enhancements ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - The :func:`read_gbq` method has gained the ``dialect`` argument to allow users to specify whether to use BigQuery's legacy SQL or BigQuery's standard SQL. See the `docs <https://pandas-gbq.readthedocs.io/en/latest/reading.html>`__ for more details (:issue:`13615`). @@ -385,7 +385,7 @@ Google BigQuery Enhancements .. _whatsnew_0190.errstate: -Fine-grained numpy errstate +Fine-grained NumPy errstate ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Previous versions of pandas would permanently silence numpy's ufunc error handling when ``pandas`` was imported. Pandas did this in order to silence the warnings that would arise from using numpy ufuncs on missing data, which are usually represented as ``NaN`` s. Unfortunately, this silenced legitimate warnings arising in non-pandas code in the application. Starting with 0.19.0, pandas will use the ``numpy.errstate`` context manager to silence these warnings in a more fine-grained manner, only around where these operations are actually used in the pandas code base. (:issue:`13109`, :issue:`13145`) @@ -1185,7 +1185,7 @@ the result of calling :func:`read_csv` without the ``chunksize=`` argument .. _whatsnew_0190.sparse: -Sparse Changes +Sparse changes ^^^^^^^^^^^^^^ These changes allow pandas to handle sparse data with more dtypes, and for work to make a smoother experience with data handling. diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index ceb1c7f27231b..06bbd9679bb4d 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -356,7 +356,7 @@ To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you .. _whatsnew_0200.enhancements.style_excel: -Excel output for styled DataFrames +Excel output for styled dataframes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Experimental support has been added to export ``DataFrame.style`` formats to Excel using the ``openpyxl`` engine. (:issue:`15530`) @@ -813,7 +813,7 @@ New behavior: .. _whatsnew_0200.api_breaking.gbq: -Pandas Google BigQuery support has moved +pandas Google BigQuery support has moved ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ pandas has split off Google BigQuery support into a separate package ``pandas-gbq``. You can ``conda install pandas-gbq -c conda-forge`` or @@ -1289,7 +1289,7 @@ A new public ``pandas.plotting`` module has been added that holds plotting funct .. _whatsnew_0200.privacy.development: -Other Development Changes +Other development changes ^^^^^^^^^^^^^^^^^^^^^^^^^ - Building pandas for development now requires ``cython >= 0.23`` (:issue:`14831`) diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index c756bc87e9b89..45399792baecf 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -733,7 +733,7 @@ is the case with :attr:`Period.end_time`, for example .. _whatsnew_0240.api_breaking.datetime_unique: -Series.unique for Timezone-Aware Data +Series.unique for timezone-aware data ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The return type of :meth:`Series.unique` for datetime with timezone values has changed @@ -1131,7 +1131,7 @@ data is incompatible with a passed ``dtype=`` (:issue:`15832`) .. _whatsnew_0240.api.concat_categorical: -Concatenation Changes +Concatenation changes ^^^^^^^^^^^^^^^^^^^^^ Calling :func:`pandas.concat` on a ``Categorical`` of ints with NA values now diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index b18d022349001..44558fd63ba15 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -85,7 +85,7 @@ See :ref:`groupby.aggregate.named` for more. .. _whatsnew_0250.enhancements.multiple_lambdas: -Groupby Aggregation with multiple lambdas +Groupby aggregation with multiple lambdas ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can now provide multiple lambda functions to a list-like aggregation in @@ -1243,7 +1243,7 @@ Sparse - Bug in :func:`numpy.modf` on a :class:`SparseArray`. Now a tuple of :class:`SparseArray` is returned (:issue:`26946`). -Build Changes +Build changes ^^^^^^^^^^^^^ - Fix install error with PyPy on macOS (:issue:`26536`) diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 6597b764581a4..4f0ca97310d85 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -15,7 +15,7 @@ including other versions of pandas. 1.0. -New Deprecation Policy +New deprecation policy ~~~~~~~~~~~~~~~~~~~~~~ Starting with Pandas 1.0.0, pandas will adopt a variant of `SemVer`_ to @@ -61,7 +61,7 @@ the :ref:`custom window rolling documentation <stats.custom_rolling_window>` .. _whatsnew_100.to_markdown: -Converting to Markdown +Converting to markdown ^^^^^^^^^^^^^^^^^^^^^^ We've added :meth:`~DataFrame.to_markdown` for creating a markdown table (:issue:`11052`) @@ -746,7 +746,7 @@ Optional libraries below the lowest tested version may still work, but are not c See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more. -Build Changes +Build changes ^^^^^^^^^^^^^ Pandas has added a `pyproject.toml <https://www.python.org/dev/peps/pep-0517/>`_ file and will no longer include @@ -778,7 +778,7 @@ Other API changes .. _whatsnew_100.api.documentation: -Documentation Improvements +Documentation improvements ^^^^^^^^^^^^^^^^^^^^^^^^^^ - Added new section on :ref:`scale` (:issue:`28315`).
Updated headers for the following files: ``` - [x] doc/source/whatsnew/v0.19.0.rst - [x] doc/source/whatsnew/v0.20.0.rst - [x] doc/source/whatsnew/v0.24.0.rst - [x] doc/source/whatsnew/v0.25.0.rst - [x] doc/source/whatsnew/v1.0.0.rst ```
https://api.github.com/repos/pandas-dev/pandas/pulls/33378
2020-04-07T19:35:47Z
2020-04-10T17:14:53Z
2020-04-10T17:14:53Z
2020-04-10T19:23:39Z
BUG: is_categorical shouldnt recognize Dtype objects
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 16373bd697c1f..efb27db26345e 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -19,7 +19,7 @@ PeriodDtype, registry, ) -from pandas.core.dtypes.generic import ABCCategorical, ABCIndexClass +from pandas.core.dtypes.generic import ABCCategorical, ABCIndexClass, ABCSeries from pandas.core.dtypes.inference import ( # noqa:F401 is_array_like, is_bool, @@ -68,9 +68,6 @@ ensure_float64 = algos.ensure_float64 ensure_float32 = algos.ensure_float32 -_ensure_datetime64ns = conversion.ensure_datetime64ns -_ensure_timedelta64ns = conversion.ensure_timedelta64ns - def ensure_float(arr): """ @@ -359,8 +356,12 @@ def is_categorical(arr) -> bool: True >>> is_categorical(pd.CategoricalIndex([1, 2, 3])) True + >>> is_categorical(cat.dtype) + False """ - return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr) + return isinstance(arr, ABCCategorical) or ( + isinstance(arr, (ABCIndexClass, ABCSeries)) and is_categorical_dtype(arr.dtype) + ) def is_datetime64_dtype(arr_or_dtype) -> bool: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index d0319e9181bad..9f147a6e6c8fd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4716,7 +4716,7 @@ def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): - values = values.values + values = values._values values = ensure_categorical(values) result = values._reverse_indexer() diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 66bf696cbe912..7dc8984f6d9b7 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -208,6 +208,7 @@ def test_is_categorical(): assert com.is_categorical(pd.CategoricalIndex([1, 2, 3])) assert not com.is_categorical([1, 2, 3]) + assert not com.is_categorical(cat.dtype) # Categorical obj, not dtype def test_is_datetime64_dtype(): diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index d0831ea514a64..d9b0473d6c3a3 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -159,7 +159,7 @@ def test_basic(self, dtype): assert is_categorical_dtype(s) assert not is_categorical_dtype(np.dtype("float64")) - assert is_categorical(s.dtype) + assert not is_categorical(s.dtype) assert is_categorical(s) assert not is_categorical(np.dtype("float64")) assert not is_categorical(1.0)
is_categorical(obj) is used in a couple places where we then go on to access `obj.dtype`, so this should not return `True` if we have a `CategoricalDtype` object.
https://api.github.com/repos/pandas-dev/pandas/pulls/33377
2020-04-07T19:17:35Z
2020-04-08T01:11:59Z
null
2020-04-08T01:12:08Z
DOC: Fix capitalization among headings in doc/source/whatsnew (#32550)
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 50333b54ca903..b5ac96752536e 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -3,7 +3,7 @@ {{ header }} ************* -Release Notes +Release notes ************* This is the list of changes to pandas between each release. For full details, diff --git a/doc/source/whatsnew/v0.14.0.rst b/doc/source/whatsnew/v0.14.0.rst index 25a75492d78fb..0041f6f03afef 100644 --- a/doc/source/whatsnew/v0.14.0.rst +++ b/doc/source/whatsnew/v0.14.0.rst @@ -473,7 +473,7 @@ Some other enhancements to the sql functions include: .. _whatsnew_0140.slicers: -MultiIndexing using slicers +Multiindexing using slicers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ In 0.14.0 we added a new way to slice MultiIndexed objects. @@ -904,7 +904,7 @@ There are no experimental changes in 0.14.0 .. _whatsnew_0140.bug_fixes: -Bug Fixes +Bug fixes ~~~~~~~~~ - Bug in Series ValueError when index doesn't match data (:issue:`6532`) diff --git a/doc/source/whatsnew/v0.15.0.rst b/doc/source/whatsnew/v0.15.0.rst index 95e354e425143..fc190908bdc07 100644 --- a/doc/source/whatsnew/v0.15.0.rst +++ b/doc/source/whatsnew/v0.15.0.rst @@ -600,7 +600,7 @@ Rolling/expanding moments improvements .. _whatsnew_0150.sql: -Improvements in the sql io module +Improvements in the SQL io module ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Added support for a ``chunksize`` parameter to ``to_sql`` function. This allows DataFrame to be written in chunks and avoid packet-size overflow errors (:issue:`8062`). diff --git a/doc/source/whatsnew/v0.18.0.rst b/doc/source/whatsnew/v0.18.0.rst index d3f96d4185d65..e371f1d9fe69a 100644 --- a/doc/source/whatsnew/v0.18.0.rst +++ b/doc/source/whatsnew/v0.18.0.rst @@ -1197,7 +1197,7 @@ Performance improvements .. _whatsnew_0180.bug_fixes: -Bug Fixes +Bug fixes ~~~~~~~~~ - Bug in ``GroupBy.size`` when data-frame is empty. (:issue:`11699`) diff --git a/doc/source/whatsnew/v0.18.1.rst b/doc/source/whatsnew/v0.18.1.rst index f786ce513f6fe..2c6e8f0e27154 100644 --- a/doc/source/whatsnew/v0.18.1.rst +++ b/doc/source/whatsnew/v0.18.1.rst @@ -380,7 +380,7 @@ New behavior: .. _whatsnew_0181.numpy_compatibility: -numpy function compatibility +NumPy function compatibility ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Compatibility between pandas array-like methods (e.g. ``sum`` and ``take``) and their ``numpy``
Updated headers for the following files: ``` - [x] doc/source/whatsnew/index.rst - [x] doc/source/whatsnew/v0.14.0.rst - [x] doc/source/whatsnew/v0.15.0.rst - [x] doc/source/whatsnew/v0.18.0.rst - [x] doc/source/whatsnew/v0.18.1.rst ```
https://api.github.com/repos/pandas-dev/pandas/pulls/33376
2020-04-07T19:15:37Z
2020-04-10T17:15:26Z
2020-04-10T17:15:26Z
2020-04-10T19:23:18Z
CLN: json_table_schema remove unused arg, annotate
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 4e42533ca2744..6061af72901a5 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -6,6 +6,7 @@ import warnings import pandas._libs.json as json +from pandas._typing import DtypeObj from pandas.core.dtypes.common import ( is_bool_dtype, @@ -26,17 +27,17 @@ loads = json.loads -def as_json_table_type(x): +def as_json_table_type(x: DtypeObj) -> str: """ Convert a NumPy / pandas type to its corresponding json_table. Parameters ---------- - x : array or dtype + x : np.dtype or ExtensionDtype Returns ------- - t : str + str the Table Schema data types Notes @@ -96,8 +97,8 @@ def set_default_names(data): return data -def convert_pandas_type_to_json_field(arr, dtype=None): - dtype = dtype or arr.dtype +def convert_pandas_type_to_json_field(arr): + dtype = arr.dtype if arr.name is None: name = "values" else: diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index c0d40048a72fe..31905b223b91d 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -103,19 +103,23 @@ class TestTableSchemaType: @pytest.mark.parametrize("int_type", [np.int, np.int16, np.int32, np.int64]) def test_as_json_table_type_int_data(self, int_type): int_data = [1, 2, 3] - assert as_json_table_type(np.array(int_data, dtype=int_type)) == "integer" + assert as_json_table_type(np.array(int_data, dtype=int_type).dtype) == "integer" @pytest.mark.parametrize( "float_type", [np.float, np.float16, np.float32, np.float64] ) def test_as_json_table_type_float_data(self, float_type): float_data = [1.0, 2.0, 3.0] - assert as_json_table_type(np.array(float_data, dtype=float_type)) == "number" + assert ( + as_json_table_type(np.array(float_data, dtype=float_type).dtype) == "number" + ) @pytest.mark.parametrize("bool_type", [bool, np.bool]) def test_as_json_table_type_bool_data(self, bool_type): bool_data = [True, False] - assert as_json_table_type(np.array(bool_data, dtype=bool_type)) == "boolean" + assert ( + as_json_table_type(np.array(bool_data, dtype=bool_type).dtype) == "boolean" + ) @pytest.mark.parametrize( "date_data", @@ -128,11 +132,11 @@ def test_as_json_table_type_bool_data(self, bool_type): ], ) def test_as_json_table_type_date_data(self, date_data): - assert as_json_table_type(date_data) == "datetime" + assert as_json_table_type(date_data.dtype) == "datetime" @pytest.mark.parametrize("str_data", [pd.Series(["a", "b"]), pd.Index(["a", "b"])]) def test_as_json_table_type_string_data(self, str_data): - assert as_json_table_type(str_data) == "string" + assert as_json_table_type(str_data.dtype) == "string" @pytest.mark.parametrize( "cat_data", @@ -145,7 +149,7 @@ def test_as_json_table_type_string_data(self, str_data): ], ) def test_as_json_table_type_categorical_data(self, cat_data): - assert as_json_table_type(cat_data) == "any" + assert as_json_table_type(cat_data.dtype) == "any" # ------ # dtypes @@ -189,7 +193,7 @@ def test_as_json_table_type_categorical_dtypes(self): # TODO: I think before is_categorical_dtype(Categorical) # returned True, but now it's False. Figure out why or # if it matters - assert as_json_table_type(pd.Categorical(["a"])) == "any" + assert as_json_table_type(pd.Categorical(["a"]).dtype) == "any" assert as_json_table_type(CategoricalDtype()) == "any"
cc @WillAyd
https://api.github.com/repos/pandas-dev/pandas/pulls/33375
2020-04-07T19:07:48Z
2020-04-07T22:50:45Z
2020-04-07T22:50:45Z
2020-04-07T23:01:32Z
DOC/TYP: Fixed typing and doc
diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index f6380808d5ac2..d947c4cf2abfa 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -5,13 +5,18 @@ from collections import defaultdict from functools import partial -from typing import Any, DefaultDict, List, Sequence, Tuple +from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Sequence, Tuple + +from pandas._typing import Scalar from pandas.core.dtypes.common import is_dict_like, is_list_like import pandas.core.common as com from pandas.core.indexes.api import Index +if TYPE_CHECKING: + import numpy as np # noqa: F401 + def is_multi_agg_with_relabel(**kwargs) -> bool: """ @@ -39,7 +44,9 @@ def is_multi_agg_with_relabel(**kwargs) -> bool: ) -def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]: +def normalize_keyword_aggregation( + kwargs: Dict[str, str] +) -> Tuple[DefaultDict[str, List[Scalar]], Tuple[str, ...], "np.ndarray"]: """ Normalize user-provided "named aggregation" kwargs. Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs @@ -51,11 +58,11 @@ def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[i Returns ------- - aggspec : dict + aggspec : collections.defaultdict of lists The transformed kwargs. - columns : List[str] + columns : tuple The user-provided keys. - col_idx_order : List[int] + col_idx_order : numpy.ndarray List of columns indices. Examples diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 208cbfc5b06d6..d2d92e98e7f0a 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -14,11 +14,13 @@ TYPE_CHECKING, Any, Callable, + DefaultDict, Dict, FrozenSet, Iterable, List, Mapping, + Optional, Sequence, Tuple, Type, @@ -30,7 +32,7 @@ import numpy as np from pandas._libs import Timestamp, lib -from pandas._typing import FrameOrSeries +from pandas._typing import FrameOrSeries, Scalar from pandas.util._decorators import Appender, Substitution, doc from pandas.core.dtypes.cast import ( @@ -909,7 +911,9 @@ class DataFrameGroupBy(GroupBy[DataFrame]): axis="", ) @Appender(_shared_docs["aggregate"]) - def aggregate(self, func=None, *args, **kwargs): + def aggregate( + self, func: Optional[DefaultDict[str, List[Scalar]]] = None, *args, **kwargs + ): relabeling = func is None and is_multi_agg_with_relabel(**kwargs) if relabeling:
- [x] xref https://github.com/pandas-dev/pandas/pull/33263#pullrequestreview-389118991 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33374
2020-04-07T16:59:55Z
2020-05-22T10:42:33Z
null
2021-05-03T12:28:53Z
BUG: #31464 Fix error when parsing JSON list of bool into Series
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 6f2b9b4f946c7..840571c21da50 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -429,6 +429,7 @@ I/O - Bug in :meth:`read_sas` was raising an ``AttributeError`` when reading files from Google Cloud Storage (issue:`33069`) - Bug in :meth:`DataFrame.to_sql` where an ``AttributeError`` was raised when saving an out of bounds date (:issue:`26761`) - Bug in :meth:`read_excel` did not correctly handle multiple embedded spaces in OpenDocument text cells. (:issue:`32207`) +- Bug in :meth:`read_json` was raising ``TypeError`` when reading a list of booleans into a Series. (:issue:`31464`) Plotting ^^^^^^^^ diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 886387a7a9fe6..20724a498b397 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -982,7 +982,7 @@ def _try_convert_to_date(self, data): for date_unit in date_units: try: new_data = to_datetime(new_data, errors="raise", unit=date_unit) - except (ValueError, OverflowError): + except (ValueError, OverflowError, TypeError): continue return new_data, True return data, False diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index b74abc965f7fa..0576d8e91d531 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1659,3 +1659,9 @@ def test_json_pandas_nulls(self, nulls_fixture): # GH 31615 result = pd.DataFrame([[nulls_fixture]]).to_json() assert result == '{"0":{"0":null}}' + + def test_readjson_bool_series(self): + # GH31464 + result = read_json("[true, true, false]", typ="series") + expected = pd.Series([True, True, False]) + tm.assert_series_equal(result, expected)
Add a missing exception type to the except clause, to cover the TypeError that is thrown by Cythonized array_to_datetime function when trying to convert bool to nonseconds. - [x] closes #31464 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33373
2020-04-07T16:56:39Z
2020-04-08T17:13:08Z
2020-04-08T17:13:08Z
2020-04-08T19:04:29Z
ENH: Add query functionality to skiprows param in read_csv
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index f74182f6a59c0..af476cb94beaa 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -88,6 +88,7 @@ Other enhancements - :class:`Series.str` now has a `fullmatch` method that matches a regular expression against the entire string in each row of the series, similar to `re.fullmatch` (:issue:`32806`). - :meth:`DataFrame.sample` will now also allow array-like and BitGenerator objects to be passed to ``random_state`` as seeds (:issue:`32503`) - :meth:`MultiIndex.union` will now raise `RuntimeWarning` if the object inside are unsortable, pass `sort=False` to suppress this warning (:issue:`33015`) +- :func:`read_csv` new parameter conditionalrows now accepts strings to filter out rows that do not pass query condition - .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index e4aeb7ad69792..23e1aba3c0953 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -330,6 +330,7 @@ cdef class TextReader: bint allow_leading_cols=True, bint low_memory=False, skiprows=None, + conditionalrows=None, skipfooter=0, bint verbose=False, bint mangle_dupe_cols=True, diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 2df81ba0aa51a..65f8421124d4f 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -179,13 +179,22 @@ Values to consider as False. skipinitialspace : bool, default False Skip spaces after delimiter. -skiprows : list-like, int or callable, optional +skiprows : list-like, int, str or callable, optional Line numbers to skip (0-indexed) or number of lines to skip (int) at the start of the file. + If str, the query passed in will be run on the data, keeping only the + rows that pass the query. + An example of a valid query argument would be ``(population > 1200)``. + If callable, the callable function will be evaluated against the row indices, returning True if the row should be skipped and False otherwise. An example of a valid callable argument would be ``lambda x: x in [0, 2]``. +conditionalrows : str, optional + Query to run on the data to keep rows based on a boolean condition. + The query passed in will be run on the data, keeping only the + rows that pass the query. + An example of a valid query argument would be ``(population > 1200)``. skipfooter : int, default 0 Number of lines at bottom of file to skip (Unsupported with engine='c'). nrows : int, optional @@ -478,6 +487,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): "names": None, "prefix": None, "skiprows": None, + "conditionalrows": None, "skipfooter": 0, "nrows": None, "na_values": None, @@ -548,6 +558,7 @@ def parser_f( false_values=None, skipinitialspace=False, skiprows=None, + conditionalrows=None, skipfooter=0, nrows=None, # NA and Missing Data Handling @@ -639,6 +650,7 @@ def parser_f( names=names, prefix=prefix, skiprows=skiprows, + conditionalrows=conditionalrows, skipfooter=skipfooter, na_values=na_values, true_values=true_values, @@ -672,8 +684,13 @@ def parser_f( infer_datetime_format=infer_datetime_format, skip_blank_lines=skip_blank_lines, ) + df = _read(filepath_or_buffer, kwds) + + if isinstance(conditionalrows, str): + df.query(conditionalrows, inplace=True) + df.reset_index(drop=True, inplace=True) - return _read(filepath_or_buffer, kwds) + return df parser_f.__name__ = name @@ -1036,6 +1053,7 @@ def _clean_options(self, options, engine): converters = options["converters"] na_values = options["na_values"] skiprows = options["skiprows"] + conditionalrows = options["conditionalrows"] validate_header_arg(options["header"]) @@ -1097,6 +1115,7 @@ def _clean_options(self, options, engine): result["na_values"] = na_values result["na_fvalues"] = na_fvalues result["skiprows"] = skiprows + result["conditionalrows"] = conditionalrows return result, engine @@ -1144,6 +1163,12 @@ def read(self, nrows=None): df = DataFrame(col_dict, columns=columns, index=index) + kwds = self.options + + if isinstance(kwds.get("conditionalrows"), str): + df.query(kwds.get("conditionalrows"), inplace=True) + df.reset_index(drop=True, inplace=True) + self._currow += new_rows if self.squeeze and len(df.columns) == 1: @@ -2269,7 +2294,7 @@ def __init__(self, f, **kwds): if callable(self.skiprows): self.skipfunc = self.skiprows - else: + elif not isinstance(self.skiprows, str): self.skipfunc = lambda x: x in self.skiprows self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"]) diff --git a/pandas/tests/io/parser/test_conditionalrows.py b/pandas/tests/io/parser/test_conditionalrows.py new file mode 100644 index 0000000000000..67031945d4f35 --- /dev/null +++ b/pandas/tests/io/parser/test_conditionalrows.py @@ -0,0 +1,107 @@ +""" +Tests that skipped rows are properly handled during +parsing for all of the parsers defined in parsers.py +""" + +from datetime import datetime +from io import StringIO + +import numpy as np +import pytest + +from pandas.errors import EmptyDataError + +from pandas import DataFrame, Index +import pandas._testing as tm + + +def test_conditional_rows_single_column_less_than(all_parsers): + # see gh-32072 + parser = all_parsers + data = """country,capital,area,population +Brazil,Brasilia,8.516,200.4 +Russia,Moscow,17.10,143.5 +India,New Delhi,3.286,1252 +China,Beijing,9.597,1357 +South Africa,Pretoria,1.221,52.98 +""" + df = parser.read_csv(StringIO(data), conditionalrows="(area > 8.516)") + expected = DataFrame( + data={ + "country": ["Russia", "China"], + "capital": ["Moscow", "Beijing"], + "area": [17.10, 9.597], + "population": [143.5, 1357], + } + ) + tm.assert_frame_equal(df, expected) + + +def test_conditional_rows_single_column_greater_than(all_parsers): + # see gh-32072 + parser = all_parsers + data = """country,capital,area,population +Brazil,Brasilia, 8.516, 200.4 +Russia,Moscow,17.10,143.5 +India,New Delhi,3.286,1252 +China,Beijing,9.597,1357 +South Africa,Pretoria,1.221,52.98 +""" + df = parser.read_csv(StringIO(data), conditionalrows="(area < 8.516)") + expected = DataFrame( + data={ + "country": ["India", "South Africa"], + "capital": ["New Delhi", "Pretoria"], + "area": [3.286, 1.221], + "population": [1252, 52.98], + } + ) + tm.assert_frame_equal(df, expected) + + +def test_conditional_rows_multi_columns_and(all_parsers): + # see gh-32072 + parser = all_parsers + data = """country,capital,area,population +Brazil,Brasilia, 8.516, 200.4 +Russia,Moscow,17.10,143.5 +India,New Delhi,3.286,1252 +China,Beijing,9.597,1357 +South Africa,Pretoria,1.221,52.98 +""" + df = parser.read_csv( + StringIO(data), conditionalrows="(area <= 8.516 and population > 1200)" + ) + expected = DataFrame( + data={ + "country": ["India"], + "capital": ["New Delhi"], + "area": [3.286], + "population": [1252.0], + } + ) + tm.assert_frame_equal(df, expected) + + +def test_conditional_rows_multi_columns_or(all_parsers): + # see gh-32072 + parser = all_parsers + data = """country,capital,area,population +Brazil,Brasilia, 8.516, 200.4 +Russia,Moscow,17.10,143.5 +India,New Delhi,3.286,1252 +China,Beijing,9.597,1357 +South Africa,Pretoria,1.221,52.98 +""" + df = parser.read_csv( + StringIO(data), conditionalrows="(area > 8.516 or area < 3.286)" + ) + expected = DataFrame( + data={ + "country": ["Russia", "China", "South Africa"], + "capital": ["Moscow", "Beijing", "Pretoria"], + "area": [17.10, 9.597, 1.221], + "population": [143.5, 1357.00, 52.98], + } + ) + tm.assert_frame_equal(df, expected)
This allows users to pass in strings to query through the ``skiprows`` parameter in ``pandas.read_csv``. This is done through querying the Dataframe as soon as it is created, and returning a Dataframe with the desired columns to the user. Included in this PR are the relevant whatsnew entry, documentation changes, and new tests. - [X] closes #32072 - [x] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33371
2020-04-07T15:37:36Z
2020-06-01T01:55:28Z
null
2020-06-01T01:55:28Z
Unpinned openpyxl from env files
diff --git a/environment.yml b/environment.yml index cf579738f6fe9..c2c0a78d3ab6b 100644 --- a/environment.yml +++ b/environment.yml @@ -86,7 +86,7 @@ dependencies: - lxml # pd.read_excel, DataFrame.to_excel, pd.ExcelWriter, pd.ExcelFile - - openpyxl<=3.0.1 + - openpyxl - xlrd - xlsxwriter - xlwt diff --git a/requirements-dev.txt b/requirements-dev.txt index 6a2cc7b53615e..f2f84cb49a691 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -55,7 +55,7 @@ numba>=0.46.0 beautifulsoup4>=4.6.0 html5lib lxml -openpyxl<=3.0.1 +openpyxl xlrd xlsxwriter xlwt
follow up to #31525 - looks like these were overlooked
https://api.github.com/repos/pandas-dev/pandas/pulls/33367
2020-04-07T14:48:09Z
2020-04-08T17:23:41Z
2020-04-08T17:23:41Z
2023-04-12T20:17:02Z
CI: Not deleting docs symlink in prod
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1b9d28b4f2d69..db1fc30111a2d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -134,7 +134,7 @@ jobs: if: github.event_name == 'push' - name: Upload web - run: rsync -az --delete --exclude='pandas-docs' --exclude='Pandas_Cheat_Sheet*' web/build/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas + run: rsync -az --delete --exclude='pandas-docs' --exclude='docs' --exclude='Pandas_Cheat_Sheet*' web/build/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas if: github.event_name == 'push' - name: Upload dev docs
In #33341 I forgot to exclude the `docs` symlink for deletion, meaning that it'll be deleted when a PR is merged. And the docs will be unavailable. Fixing it here. @pandas-dev/pandas-core can we prioritize this please.
https://api.github.com/repos/pandas-dev/pandas/pulls/33366
2020-04-07T14:25:00Z
2020-04-07T14:43:00Z
2020-04-07T14:43:00Z
2020-04-07T14:43:13Z
PERF: statically define classes for is_dtype checks
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 6f2b9b4f946c7..c92fd0de63e12 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -289,6 +289,7 @@ Performance improvements sparse values from ``scipy.sparse`` matrices using the :meth:`DataFrame.sparse.from_spmatrix` constructor (:issue:`32821`, :issue:`32825`, :issue:`32826`, :issue:`32856`, :issue:`32858`). +- Performance improvement in dtype checking functions in ``pandas.api.types`` (:issue:`33364`) - Performance improvement in reductions (sum, min, max) for nullable (integer and boolean) dtypes (:issue:`30982`, :issue:`33261`). diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 16373bd697c1f..599d7c52ecb6e 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -2,7 +2,7 @@ Common type operations. """ -from typing import Any, Callable, Union +from typing import Any, Union import warnings import numpy as np @@ -199,20 +199,28 @@ def ensure_python_int(value: Union[int, np.integer]) -> int: return new_value -def classes(*klasses) -> Callable: - """ evaluate if the tipo is a subclass of the klasses """ - return lambda tipo: issubclass(tipo, klasses) - - -def classes_and_not_datetimelike(*klasses) -> Callable: - """ - evaluate if the tipo is a subclass of the klasses - and not a datetimelike - """ - return lambda tipo: ( - issubclass(tipo, klasses) - and not issubclass(tipo, (np.datetime64, np.timedelta64)) - ) +_object_classes = lambda tipo: issubclass(tipo, np.object_) +_datetime64_classes = lambda tipo: issubclass(tipo, np.datetime64) +_timedelta64_classes = lambda tipo: issubclass(tipo, np.timedelta64) +_any_int_classes = lambda tipo: issubclass(tipo, (np.integer, np.timedelta64)) +_int_classes = lambda tipo: issubclass(tipo, np.integer) and not issubclass( + tipo, (np.datetime64, np.timedelta64) +) +_signed_int_classes = lambda tipo: issubclass( + tipo, np.signedinteger +) and not issubclass(tipo, (np.datetime64, np.timedelta64)) +_unsigned_int_classes = lambda tipo: issubclass( + tipo, np.unsignedinteger +) and not issubclass(tipo, (np.datetime64, np.timedelta64)) +_int64_classes = lambda tipo: issubclass(tipo, np.int64) +_datetime64_or_timedelta64_classes = lambda tipo: issubclass( + tipo, (np.datetime64, np.timedelta64) +) +_numeric_classes = lambda tipo: issubclass( + tipo, (np.number, np.bool_) +) and not issubclass(tipo, (np.datetime64, np.timedelta64)) +_float_classes = lambda tipo: issubclass(tipo, np.floating) +_complex_classes = lambda tipo: issubclass(tipo, np.complexfloating) def is_object_dtype(arr_or_dtype) -> bool: @@ -242,7 +250,7 @@ def is_object_dtype(arr_or_dtype) -> bool: >>> is_object_dtype([1, 2, 3]) False """ - return _is_dtype_type(arr_or_dtype, classes(np.object_)) + return _is_dtype_type(arr_or_dtype, _object_classes) def is_sparse(arr) -> bool: @@ -390,7 +398,7 @@ def is_datetime64_dtype(arr_or_dtype) -> bool: >>> is_datetime64_dtype([1, 2, 3]) False """ - return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) + return _is_dtype_type(arr_or_dtype, _datetime64_classes) def is_datetime64tz_dtype(arr_or_dtype) -> bool: @@ -457,7 +465,7 @@ def is_timedelta64_dtype(arr_or_dtype) -> bool: >>> is_timedelta64_dtype('0 days') False """ - return _is_dtype_type(arr_or_dtype, classes(np.timedelta64)) + return _is_dtype_type(arr_or_dtype, _timedelta64_classes) def is_period_dtype(arr_or_dtype) -> bool: @@ -687,7 +695,7 @@ def is_any_int_dtype(arr_or_dtype) -> bool: >>> is_any_int_dtype(pd.Index([1, 2.])) # float False """ - return _is_dtype_type(arr_or_dtype, classes(np.integer, np.timedelta64)) + return _is_dtype_type(arr_or_dtype, _any_int_classes) def is_integer_dtype(arr_or_dtype) -> bool: @@ -741,7 +749,7 @@ def is_integer_dtype(arr_or_dtype) -> bool: >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ - return _is_dtype_type(arr_or_dtype, classes_and_not_datetimelike(np.integer)) + return _is_dtype_type(arr_or_dtype, _int_classes) def is_signed_integer_dtype(arr_or_dtype) -> bool: @@ -797,7 +805,7 @@ def is_signed_integer_dtype(arr_or_dtype) -> bool: >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned False """ - return _is_dtype_type(arr_or_dtype, classes_and_not_datetimelike(np.signedinteger)) + return _is_dtype_type(arr_or_dtype, _signed_int_classes) def is_unsigned_integer_dtype(arr_or_dtype) -> bool: @@ -844,9 +852,7 @@ def is_unsigned_integer_dtype(arr_or_dtype) -> bool: >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32)) True """ - return _is_dtype_type( - arr_or_dtype, classes_and_not_datetimelike(np.unsignedinteger) - ) + return _is_dtype_type(arr_or_dtype, _unsigned_int_classes) def is_int64_dtype(arr_or_dtype) -> bool: @@ -896,7 +902,7 @@ def is_int64_dtype(arr_or_dtype) -> bool: >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned False """ - return _is_dtype_type(arr_or_dtype, classes(np.int64)) + return _is_dtype_type(arr_or_dtype, _int64_classes) def is_datetime64_any_dtype(arr_or_dtype) -> bool: @@ -1050,7 +1056,7 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype) -> bool: >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64)) True """ - return _is_dtype_type(arr_or_dtype, classes(np.datetime64, np.timedelta64)) + return _is_dtype_type(arr_or_dtype, _datetime64_or_timedelta64_classes) def _is_unorderable_exception(e: TypeError) -> bool: @@ -1267,9 +1273,7 @@ def is_numeric_dtype(arr_or_dtype) -> bool: >>> is_numeric_dtype(np.array([], dtype=np.timedelta64)) False """ - return _is_dtype_type( - arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_) - ) + return _is_dtype_type(arr_or_dtype, _numeric_classes) def is_string_like_dtype(arr_or_dtype) -> bool: @@ -1334,7 +1338,7 @@ def is_float_dtype(arr_or_dtype) -> bool: >>> is_float_dtype(pd.Index([1, 2.])) True """ - return _is_dtype_type(arr_or_dtype, classes(np.floating)) + return _is_dtype_type(arr_or_dtype, _float_classes) def is_bool_dtype(arr_or_dtype) -> bool: @@ -1545,7 +1549,7 @@ def is_complex_dtype(arr_or_dtype) -> bool: >>> is_complex_dtype(np.array([1 + 1j, 5])) True """ - return _is_dtype_type(arr_or_dtype, classes(np.complexfloating)) + return _is_dtype_type(arr_or_dtype, _complex_classes) def _is_dtype(arr_or_dtype, condition) -> bool:
This moves the definition of the functions passed to `_is_dtype_type` from dynamically generated functions is `is_integer_dtype` to top-level functions. I can't run asv right now (https://github.com/pandas-dev/pandas/issues/33315#issuecomment-610329344) but here are some timeits master ```python In [2]: t = np.dtype('int64') In [3]: %timeit pd.api.types.is_integer_dtype(t) 2.22 µs ± 51.6 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) ``` This PR ```python In [3]: %timeit pd.api.types.is_integer_dtype(t) 1.54 µs ± 39.6 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/33364
2020-04-07T11:28:13Z
2020-07-17T11:18:57Z
null
2023-05-11T01:19:35Z
DEP: Bump min version of dateutil to 2.7.3
diff --git a/ci/deps/azure-36-minimum_versions.yaml b/ci/deps/azure-36-minimum_versions.yaml index 0e0ebe5c75218..e553330b962a2 100644 --- a/ci/deps/azure-36-minimum_versions.yaml +++ b/ci/deps/azure-36-minimum_versions.yaml @@ -22,7 +22,7 @@ dependencies: - numpy=1.13.3 - openpyxl=2.5.7 - pytables=3.4.2 - - python-dateutil=2.6.1 + - python-dateutil=2.7.3 - pytz=2017.2 - scipy=0.19.0 - xlrd=1.1.0 diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-36.yaml index 279f44b06bd02..93885afbc4114 100644 --- a/ci/deps/azure-macos-36.yaml +++ b/ci/deps/azure-macos-36.yaml @@ -23,7 +23,7 @@ dependencies: - openpyxl - pyarrow>=0.13.0 - pytables - - python-dateutil==2.6.1 + - python-dateutil==2.7.3 - pytz - xarray - xlrd diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index bc1be527696a5..7fa2233e79fc0 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -221,7 +221,7 @@ Package Minimum support ================================================================ ========================== `setuptools <https://setuptools.readthedocs.io/en/latest/>`__ 24.2.0 `NumPy <https://www.numpy.org>`__ 1.13.3 -`python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__ 2.6.1 +`python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__ 2.7.3 `pytz <https://pypi.org/project/pytz/>`__ 2017.2 ================================================================ ========================== diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 584e21e87390d..2a89fe6b11f9a 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -94,6 +94,19 @@ Other enhancements .. --------------------------------------------------------------------------- +Increased minimum versions for dependencies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Some minimum supported versions of dependencies were updated (:issue:`29766`, :issue:`29723`). +If installed, we now require: + ++-----------------+-----------------+----------+---------+ +| Package | Minimum Version | Required | Changed | ++=================+=================+==========+=========+ +| python-dateutil | 2.7.3 | X | | ++-----------------+-----------------+----------+---------+ + + Development Changes ^^^^^^^^^^^^^^^^^^^ diff --git a/environment.yml b/environment.yml index c874c5a8f68da..67b2df4dc5a0e 100644 --- a/environment.yml +++ b/environment.yml @@ -5,7 +5,7 @@ dependencies: # required - numpy>=1.15 - python=3 - - python-dateutil>=2.6.1 + - python-dateutil>=2.7.3 - pytz # benchmarks diff --git a/requirements-dev.txt b/requirements-dev.txt index ffbdfccced6a9..5cef428d35452 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -2,7 +2,7 @@ # See that file for comments about the need/usage of each dependency. numpy>=1.15 -python-dateutil>=2.6.1 +python-dateutil>=2.7.3 pytz asv cython>=0.29.16 diff --git a/setup.py b/setup.py index 338686bddd146..62c645bbb1465 100755 --- a/setup.py +++ b/setup.py @@ -747,7 +747,7 @@ def srcpath(name=None, suffix=".pyx", subdir="src"): def setup_package(): setuptools_kwargs = { "install_requires": [ - "python-dateutil >= 2.6.1", + "python-dateutil >= 2.7.3", "pytz >= 2017.2", f"numpy >= {min_numpy_ver}", ],
- [x] xref https://github.com/pandas-dev/pandas/pull/32465#discussion_r404449469 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry --- Not sure where to put the note in the "whatsnew"
https://api.github.com/repos/pandas-dev/pandas/pulls/33363
2020-04-07T11:08:22Z
2020-04-10T16:53:50Z
2020-04-10T16:53:50Z
2020-04-10T17:16:44Z
TST: iat with duplicate column names
diff --git a/pandas/tests/frame/indexing/test_iat.py b/pandas/tests/frame/indexing/test_iat.py index 23e3392251a3a..b1025b99e9bd5 100644 --- a/pandas/tests/frame/indexing/test_iat.py +++ b/pandas/tests/frame/indexing/test_iat.py @@ -1,3 +1,6 @@ +import pandas as pd + + def test_iat(float_frame): for i, row in enumerate(float_frame.index): @@ -5,3 +8,9 @@ def test_iat(float_frame): result = float_frame.iat[i, j] expected = float_frame.at[row, col] assert result == expected + + +def test_iat_duplicate_columns(): + # https://github.com/pandas-dev/pandas/issues/11754 + df = pd.DataFrame([[1, 2]], columns=["x", "x"]) + assert df.iat[0, 0] == 1
- [x] closes #11754 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33362
2020-04-07T10:34:43Z
2020-04-08T09:55:40Z
2020-04-08T09:55:39Z
2020-04-08T10:02:14Z
Fix read parquet import error message
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 46320355512d1..33747d2a6dd83 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -18,20 +18,23 @@ def get_engine(engine: str) -> "BaseImpl": if engine == "auto": # try engines in this order - try: - return PyArrowImpl() - except ImportError: - pass + engine_classes = [PyArrowImpl, FastParquetImpl] - try: - return FastParquetImpl() - except ImportError: - pass + error_msgs = "" + for engine_class in engine_classes: + try: + return engine_class() + except ImportError as err: + error_msgs += "\n - " + str(err) raise ImportError( "Unable to find a usable engine; " "tried using: 'pyarrow', 'fastparquet'.\n" - "pyarrow or fastparquet is required for parquet support" + "A suitable version of " + "pyarrow or fastparquet is required for parquet " + "support.\n" + "Trying to import the above resulted in these errors:" + f"{error_msgs}" ) if engine == "pyarrow": @@ -105,9 +108,7 @@ def write( **kwargs, ) else: - self.api.parquet.write_table( - table, path, compression=compression, **kwargs, - ) + self.api.parquet.write_table(table, path, compression=compression, **kwargs) def read(self, path, columns=None, **kwargs): path, _, _, should_close = get_filepath_or_buffer(path) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index d0eaafb787222..94cf16c20e6c4 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -35,6 +35,7 @@ except ImportError: _HAVE_FASTPARQUET = False + pytestmark = pytest.mark.filterwarnings( "ignore:RangeIndex.* is deprecated:DeprecationWarning" ) @@ -223,6 +224,49 @@ def test_options_get_engine(fp, pa): assert isinstance(get_engine("fastparquet"), FastParquetImpl) +def test_get_engine_auto_error_message(): + # Expect different error messages from get_engine(engine="auto") + # if engines aren't installed vs. are installed but bad version + from pandas.compat._optional import VERSIONS + + # Do we have engines installed, but a bad version of them? + pa_min_ver = VERSIONS.get("pyarrow") + fp_min_ver = VERSIONS.get("fastparquet") + have_pa_bad_version = ( + False + if not _HAVE_PYARROW + else LooseVersion(pyarrow.__version__) < LooseVersion(pa_min_ver) + ) + have_fp_bad_version = ( + False + if not _HAVE_FASTPARQUET + else LooseVersion(fastparquet.__version__) < LooseVersion(fp_min_ver) + ) + # Do we have usable engines installed? + have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version + have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version + + if not have_usable_pa and not have_usable_fp: + # No usable engines found. + if have_pa_bad_version: + match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow." + with pytest.raises(ImportError, match=match): + get_engine("auto") + else: + match = "Missing optional dependency .pyarrow." + with pytest.raises(ImportError, match=match): + get_engine("auto") + + if have_fp_bad_version: + match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet." + with pytest.raises(ImportError, match=match): + get_engine("auto") + else: + match = "Missing optional dependency .fastparquet." + with pytest.raises(ImportError, match=match): + get_engine("auto") + + def test_cross_engine_pa_fp(df_cross_compat, pa, fp): # cross-compat with differing reading/writing engines
- [x] closes #33313 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry N/A `pandas.io.parquet.get_engine()` uses handling of `ImportError`s for flow control to decide which parquet reader engine is used. In doing so, it quashed lower-level error messages that would have been helpful to the user attempting to diagnose the error, replacing it with a misleading error message. I refactored the error handling, allowing for these lower-level error messages to be collected and explicitly "bubbled up". Thus fixing the incorrect error message. No tests added -- this behaviour is not worthy of testing. Presumably not worthy of a whatsnew entry either.
https://api.github.com/repos/pandas-dev/pandas/pulls/33361
2020-04-07T09:46:52Z
2020-04-08T16:45:46Z
2020-04-08T16:45:46Z
2020-04-08T20:10:58Z
PERF: improve IntegerArray fast constructor
diff --git a/asv_bench/benchmarks/array.py b/asv_bench/benchmarks/array.py index 8cbf8c8592661..103df0fd94847 100644 --- a/asv_bench/benchmarks/array.py +++ b/asv_bench/benchmarks/array.py @@ -9,6 +9,11 @@ def setup(self): self.values_float = np.array([1.0, 0.0, 1.0, 0.0]) self.values_integer = np.array([1, 0, 1, 0]) self.values_integer_like = [1, 0, 1, 0] + self.data = np.array([True, False, True, False]) + self.mask = np.array([False, False, True, False]) + + def time_constructor(self): + pd.arrays.BooleanArray(self.data, self.mask) def time_from_bool_array(self): pd.array(self.values_bool, dtype="boolean") @@ -21,3 +26,16 @@ def time_from_integer_like(self): def time_from_float_array(self): pd.array(self.values_float, dtype="boolean") + + +class IntegerArray: + def setup(self): + self.values_integer = np.array([1, 0, 1, 0]) + self.data = np.array([1, 2, 3, 4], dtype="int64") + self.mask = np.array([False, False, True, False]) + + def time_constructor(self): + pd.arrays.IntegerArray(self.data, self.mask) + + def time_from_integer_array(self): + pd.array(self.values_integer, dtype="Int64") diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index e85534def6b97..b78a10efa04a0 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -271,18 +271,8 @@ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False): if not (isinstance(values, np.ndarray) and values.dtype == np.bool_): raise TypeError( "values should be boolean numpy array. Use " - "the 'array' function instead" + "the 'pd.array' function instead" ) - if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_): - raise TypeError( - "mask should be boolean numpy array. Use " - "the 'array' function instead" - ) - if not values.ndim == 1: - raise ValueError("values must be a 1D array") - if not mask.ndim == 1: - raise ValueError("mask must be a 1D array") - self._dtype = BooleanDtype() super().__init__(values, mask, copy=copy) diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index d47a396bbb14e..5d6f49852e696 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -343,15 +343,10 @@ def dtype(self) -> _IntegerDtype: return _dtypes[str(self._data.dtype)] def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False): - if not (isinstance(values, np.ndarray) and is_integer_dtype(values.dtype)): + if not (isinstance(values, np.ndarray) and values.dtype.kind in ["i", "u"]): raise TypeError( "values should be integer numpy array. Use " - "the 'integer_array' function instead" - ) - if not (isinstance(mask, np.ndarray) and is_bool_dtype(mask.dtype)): - raise TypeError( - "mask should be boolean numpy array. Use " - "the 'integer_array' function instead" + "the 'pd.array' function instead" ) super().__init__(values, mask, copy=copy) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index d23d26d870f75..fc5b307bd5754 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -30,6 +30,17 @@ class BaseMaskedArray(ExtensionArray, ExtensionOpsMixin): _internal_fill_value: Scalar def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False): + # values is supposed to already be validated in the subclass + if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_): + raise TypeError( + "mask should be boolean numpy array. Use " + "the 'pd.array' function instead" + ) + if not values.ndim == 1: + raise ValueError("values must be a 1D array") + if not mask.ndim == 1: + raise ValueError("mask must be a 1D array") + if copy: values = values.copy() mask = mask.copy() diff --git a/pandas/tests/arrays/integer/test_construction.py b/pandas/tests/arrays/integer/test_construction.py index 4a62a35e23d93..43936d8b95bd6 100644 --- a/pandas/tests/arrays/integer/test_construction.py +++ b/pandas/tests/arrays/integer/test_construction.py @@ -70,7 +70,7 @@ def test_integer_array_constructor(): expected = integer_array([1, 2, 3, np.nan], dtype="int64") tm.assert_extension_array_equal(result, expected) - msg = r".* should be .* numpy array. Use the 'integer_array' function instead" + msg = r".* should be .* numpy array. Use the 'pd.array' function instead" with pytest.raises(TypeError, match=msg): IntegerArray(values.tolist(), mask)
Trying to improve the IntegerArray constructor from its constituents. Example test case: ``` a = pd.array([1, 2, 3], dtype="Int64") values = a._data mask = a._mask pd.arrays.IntegerArray(values, mask) ``` ``` In [5]: %timeit pd.arrays.IntegerArray(values, mask) 2.9 µs ± 45.4 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) # master 775 ns ± 41.9 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) # PR ``` So what is in the PR is *one possible* solution, to show the impact of using "general dtype checking functions" (that eg also need to deal with EA dtypes, getting the dtype of values being passed, etc). It is clear that here we can do a much more specialized check, and that this gives a considerable performance boost (in eg block/column-wise operations on IntegerArray, this is reconstructed many times). Some options: - do an inline specialized check as done here - define separate helper functions for those specialized checks (eg restricting to numpy dtypes) - improve performance of `is_bool_dtype` / `is_integer_dtype` for those cases (maybe putting a check like this as a "fastpath" at the beginning of those functions might avoid more costly checks done in those functions for this specific case (eg "is_bool_np_dtype") - simply don't do any validation at all in the IntegerArray constructor, and assume this is the responsibility of the caller - provide a keyword in the IntegerArray constructor to turn off validation - add a private constructor (similar as the `_simple_new` we have for others) that uses this fastpath For optimal performance, having a way to don't do any validation at all (so also don't check the passed values are ndarray, or their dtypes) would actually even be better than what this PR does. But I also like the clean constructor we have now (eg no separate private constructor)
https://api.github.com/repos/pandas-dev/pandas/pulls/33359
2020-04-07T09:07:13Z
2020-04-10T17:35:09Z
2020-04-10T17:35:09Z
2020-04-10T17:57:07Z
PERF: fastpath DataFrame constructor from BlockManager
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index a0351cb687d02..a013434491589 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -166,9 +166,8 @@ def get_result(self): elif isinstance(self.f, np.ufunc): with np.errstate(all="ignore"): results = self.obj._mgr.apply("apply", func=self.f) - return self.obj._constructor( - data=results, index=self.index, columns=self.columns, copy=False - ) + # _constructor will retain self.index and self.columns + return self.obj._constructor(data=results) # broadcasting if self.result_type == "broadcast": diff --git a/pandas/core/frame.py b/pandas/core/frame.py index aedbba755227d..aa190c35c4c18 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -434,6 +434,11 @@ def __init__( data = data._mgr if isinstance(data, BlockManager): + if index is None and columns is None and dtype is None and copy is False: + # GH#33357 fastpath + NDFrame.__init__(self, data) + return + mgr = self._init_mgr( data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy ) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 5a7da0cfb29ab..056ee70b851ec 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -213,13 +213,13 @@ def __init__( object.__setattr__(self, "_attrs", attrs) @classmethod - def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False): + def _init_mgr(cls, mgr, axes, dtype=None, copy: bool = False) -> BlockManager: """ passed a manager and a axes dict """ for a, axe in axes.items(): if axe is not None: - mgr = mgr.reindex_axis( - axe, axis=cls._get_block_manager_axis(a), copy=False - ) + axe = ensure_index(axe) + bm_axis = cls._get_block_manager_axis(a) + mgr = mgr.reindex_axis(axe, axis=bm_axis, copy=False) # make a copy if explicitly requested if copy: diff --git a/pandas/core/series.py b/pandas/core/series.py index c9684d0985173..2f4ca61a402dc 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -204,6 +204,17 @@ def __init__( self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False ): + if ( + isinstance(data, SingleBlockManager) + and index is None + and dtype is None + and copy is False + ): + # GH#33357 called with just the SingleBlockManager + NDFrame.__init__(self, data) + self.name = name + return + # we are called internally, so short-circuit if fastpath: @@ -827,9 +838,8 @@ def take(self, indices, axis=0, is_copy=None, **kwargs) -> "Series": new_index = self.index.take(indices) new_values = self._values.take(indices) - return self._constructor( - new_values, index=new_index, fastpath=True - ).__finalize__(self, method="take") + result = self._constructor(new_values, index=new_index, fastpath=True) + return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis=0): """
When trying to make `fast_apply` unnecessary I found that there is non-trivial overhead in the constructors that we can avoid. Moreover, since users shouldn't be passing BlockManagers around anyway, we _might_ be able to get that case out of the DataFrame constructor entirely. ``` In [2]: ser = pd.Series(range(5)) In [3]: df = ser.to_frame() In [4]: mgr = df._mgr In [5]: %timeit pd.DataFrame(mgr) 2.08 µs ± 51.4 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) In [6]: %timeit pd.DataFrame._from_mgr(mgr) 1.26 µs ± 17.3 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/33357
2020-04-07T03:42:11Z
2020-04-09T17:56:37Z
2020-04-09T17:56:37Z
2020-04-09T18:03:02Z
BUG/REF: unstack with EA dtypes
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index aedbba755227d..041f47c529686 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -595,6 +595,16 @@ def _is_homogeneous_type(self) -> bool: else: return not self._mgr.is_mixed_type + @property + def _can_fast_transpose(self) -> bool: + """ + Can we transpose this DataFrame without creating any new array objects. + """ + if self._data.any_extension_types: + # TODO(EA2D) special case would be unnecessary with 2D EAs + return False + return len(self._data.blocks) == 1 + # ---------------------------------------------------------------------- # Rendering Methods diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index d9e0206d73b95..b883c5b1568a0 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -24,7 +24,6 @@ import pandas.core.algorithms as algos from pandas.core.arrays import SparseArray from pandas.core.arrays.categorical import factorize_from_iterable -from pandas.core.construction import extract_array from pandas.core.frame import DataFrame from pandas.core.indexes.api import Index, MultiIndex from pandas.core.series import Series @@ -413,7 +412,7 @@ def unstack(obj, level, fill_value=None): level = obj.index._get_level_number(level) if isinstance(obj, DataFrame): - if isinstance(obj.index, MultiIndex): + if isinstance(obj.index, MultiIndex) or not obj._can_fast_transpose: return _unstack_frame(obj, level, fill_value=fill_value) else: return obj.T.stack(dropna=False) @@ -429,14 +428,14 @@ def unstack(obj, level, fill_value=None): def _unstack_frame(obj, level, fill_value=None): - if obj._is_mixed_type: + if not obj._can_fast_transpose: unstacker = _Unstacker(obj.index, level=level) - blocks = obj._mgr.unstack(unstacker, fill_value=fill_value) - return obj._constructor(blocks) + mgr = obj._mgr.unstack(unstacker, fill_value=fill_value) + return obj._constructor(mgr) else: return _Unstacker( obj.index, level=level, constructor=obj._constructor, - ).get_result(obj.values, value_columns=obj.columns, fill_value=fill_value) + ).get_result(obj._values, value_columns=obj.columns, fill_value=fill_value) def _unstack_extension_series(series, level, fill_value): @@ -462,31 +461,10 @@ def _unstack_extension_series(series, level, fill_value): Each column of the DataFrame will have the same dtype as the input Series. """ - # Implementation note: the basic idea is to - # 1. Do a regular unstack on a dummy array of integers - # 2. Followup with a columnwise take. - # We use the dummy take to discover newly-created missing values - # introduced by the reshape. - from pandas.core.reshape.concat import concat - - dummy_arr = np.arange(len(series)) - # fill_value=-1, since we will do a series.values.take later - result = _Unstacker(series.index, level=level).get_result( - dummy_arr, value_columns=None, fill_value=-1 - ) - - out = [] - values = extract_array(series, extract_numpy=False) - - for col, indices in result.items(): - out.append( - Series( - values.take(indices.values, allow_fill=True, fill_value=fill_value), - name=col, - index=result.index, - ) - ) - return concat(out, axis="columns", copy=False, keys=result.columns) + # Defer to the logic in ExtensionBlock._unstack + df = series.to_frame() + result = df.unstack(level=level, fill_value=fill_value) + return result.droplevel(level=0, axis=1) def stack(frame, level=-1, dropna=True): diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py index 3b1e896857117..f33f960e8e341 100644 --- a/pandas/tests/extension/base/casting.py +++ b/pandas/tests/extension/base/casting.py @@ -10,10 +10,22 @@ class BaseCastingTests(BaseExtensionTests): """Casting to and from ExtensionDtypes""" def test_astype_object_series(self, all_data): - ser = pd.Series({"A": all_data}) + ser = pd.Series(all_data, name="A") result = ser.astype(object) assert isinstance(result._mgr.blocks[0], ObjectBlock) + def test_astype_object_frame(self, all_data): + df = pd.DataFrame({"A": all_data}) + + result = df.astype(object) + blk = result._data.blocks[0] + assert isinstance(blk, ObjectBlock), type(blk) + + # FIXME: these currently fail; dont leave commented-out + # check that we can compare the dtypes + # cmp = result.dtypes.equals(df.dtypes) + # assert not cmp.any() + def test_tolist(self, data): result = pd.Series(data).tolist() expected = list(data) diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index d2c3877de99ed..c9445ceec2c77 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -295,6 +295,14 @@ def test_unstack(self, data, index, obj): assert all( isinstance(result[col].array, type(data)) for col in result.columns ) + + if obj == "series": + # We should get the same result with to_frame+unstack+droplevel + df = ser.to_frame() + + alt = df.unstack(level=level).droplevel(0, axis=1) + self.assert_frame_equal(result, alt) + expected = ser.astype(object).unstack(level=level) result = result.astype(object) diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 198a228b621b4..694bbee59606f 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -3,6 +3,8 @@ from pandas.errors import PerformanceWarning +from pandas.core.dtypes.common import is_object_dtype + import pandas as pd from pandas import SparseDtype import pandas._testing as tm @@ -309,7 +311,25 @@ def test_searchsorted(self, data_for_sorting, as_series): class TestCasting(BaseSparseTests, base.BaseCastingTests): - pass + def test_astype_object_series(self, all_data): + # Unlike the base class, we do not expect the resulting Block + # to be ObjectBlock + ser = pd.Series(all_data, name="A") + result = ser.astype(object) + assert is_object_dtype(result._data.blocks[0].dtype) + + def test_astype_object_frame(self, all_data): + # Unlike the base class, we do not expect the resulting Block + # to be ObjectBlock + df = pd.DataFrame({"A": all_data}) + + result = df.astype(object) + assert is_object_dtype(result._data.blocks[0].dtype) + + # FIXME: these currently fail; dont leave commented-out + # check that we can compare the dtypes + # comp = result.dtypes.equals(df.dtypes) + # assert not comp.any() class TestArithmeticOps(BaseSparseTests, base.BaseArithmeticOpsTests):
1) `ser.unstack(args)` should behave like `ser.to_frame().unstack(args).droplevel(level=0, axis=1)`. This fails on master, is fixed by this PR. 2) `reshape._unstack_extension_series` currently re-implements logic that is in `ExtensionBlock._unstack`. This de-duplicates by dispatching. 3) We currently transpose in cases where we shouldn't. This implements `DataFrame._can_fast_transpose` to avoid that. 4) `test_astype_object_series` has what looks like a typo that is making us test not-the-intended-thing; @TomAugspurger can you confirm im reading this right? 5) The dtype comparisons in `test_astype_object_frame` currently raise, are commented out for exposition, should be fixed in a separate PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/33356
2020-04-07T03:22:44Z
2020-04-10T16:12:06Z
2020-04-10T16:12:06Z
2020-04-10T17:48:07Z
REF: collect Index constructor tests
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 0417208868314..1083f1c332705 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -148,17 +148,6 @@ def test_constructor_from_series_dtlike(self, index, has_tz): if has_tz: assert result.tz == index.tz - @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) - def test_constructor_from_series(self, klass): - expected = DatetimeIndex( - [Timestamp("20110101"), Timestamp("20120101"), Timestamp("20130101")] - ) - s = Series( - [Timestamp("20110101"), Timestamp("20120101"), Timestamp("20130101")] - ) - result = klass(s) - tm.assert_index_equal(result, expected) - def test_constructor_from_series_freq(self): # GH 6273 # create from a series, passing a freq @@ -255,47 +244,6 @@ def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val): result = Index(np.array(na_list)) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("pos", [0, 1]) - @pytest.mark.parametrize( - "klass,dtype,ctor", - [ - (pd.DatetimeIndex, "datetime64[ns]", np.datetime64("nat")), - (pd.TimedeltaIndex, "timedelta64[ns]", np.timedelta64("nat")), - ], - ) - def test_index_ctor_infer_nat_dt_like(self, pos, klass, dtype, ctor, nulls_fixture): - expected = klass([pd.NaT, pd.NaT]) - assert expected.dtype == dtype - data = [ctor] - data.insert(pos, nulls_fixture) - - if nulls_fixture is pd.NA: - expected = Index([pd.NA, pd.NaT]) - pytest.xfail("Broken with np.NaT ctor; see GH 31884") - - result = Index(data) - tm.assert_index_equal(result, expected) - - result = Index(np.array(data, dtype=object)) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize("swap_objs", [True, False]) - def test_index_ctor_nat_result(self, swap_objs): - # mixed np.datetime64/timedelta64 nat results in object - data = [np.datetime64("nat"), np.timedelta64("nat")] - if swap_objs: - data = data[::-1] - - expected = pd.Index(data, dtype=object) - tm.assert_index_equal(Index(data), expected) - tm.assert_index_equal(Index(np.array(data, dtype=object)), expected) - - def test_index_ctor_infer_periodindex(self): - xp = period_range("2012-1-1", freq="M", periods=3) - rs = Index(xp) - tm.assert_index_equal(rs, xp) - assert isinstance(rs, PeriodIndex) - @pytest.mark.parametrize( "vals,dtype", [ diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py index 33f61de6a4ebf..87df5959e6221 100644 --- a/pandas/tests/indexes/test_index_new.py +++ b/pandas/tests/indexes/test_index_new.py @@ -6,7 +6,21 @@ from pandas.core.dtypes.common import is_unsigned_integer_dtype -from pandas import CategoricalIndex, Index, Int64Index, MultiIndex, UInt64Index +from pandas import ( + NA, + CategoricalIndex, + DatetimeIndex, + Index, + Int64Index, + MultiIndex, + NaT, + PeriodIndex, + Series, + TimedeltaIndex, + Timestamp, + UInt64Index, + period_range, +) import pandas._testing as tm @@ -53,3 +67,58 @@ def test_constructor_categorical_to_object(self): ci = CategoricalIndex(range(5)) result = Index(ci, dtype=object) assert not isinstance(result, CategoricalIndex) + + def test_constructor_infer_periodindex(self): + xp = period_range("2012-1-1", freq="M", periods=3) + rs = Index(xp) + tm.assert_index_equal(rs, xp) + assert isinstance(rs, PeriodIndex) + + @pytest.mark.parametrize("pos", [0, 1]) + @pytest.mark.parametrize( + "klass,dtype,ctor", + [ + (DatetimeIndex, "datetime64[ns]", np.datetime64("nat")), + (TimedeltaIndex, "timedelta64[ns]", np.timedelta64("nat")), + ], + ) + def test_constructor_infer_nat_dt_like( + self, pos, klass, dtype, ctor, nulls_fixture + ): + expected = klass([NaT, NaT]) + assert expected.dtype == dtype + data = [ctor] + data.insert(pos, nulls_fixture) + + if nulls_fixture is NA: + expected = Index([NA, NaT]) + pytest.xfail("Broken with np.NaT ctor; see GH 31884") + + result = Index(data) + tm.assert_index_equal(result, expected) + + result = Index(np.array(data, dtype=object)) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("swap_objs", [True, False]) + def test_constructor_mixed_nat_objs_infers_object(self, swap_objs): + # mixed np.datetime64/timedelta64 nat results in object + data = [np.datetime64("nat"), np.timedelta64("nat")] + if swap_objs: + data = data[::-1] + + expected = Index(data, dtype=object) + tm.assert_index_equal(Index(data), expected) + tm.assert_index_equal(Index(np.array(data, dtype=object)), expected) + + +class TestIndexConstructorUnwrapping: + # Test passing different arraylike values to pd.Index + + @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) + def test_constructor_from_series_dt64(self, klass): + stamps = [Timestamp("20110101"), Timestamp("20120101"), Timestamp("20130101")] + expected = DatetimeIndex(stamps) + ser = Series(stamps) + result = klass(ser) + tm.assert_index_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/33354
2020-04-07T02:46:52Z
2020-04-07T19:18:47Z
2020-04-07T19:18:47Z
2020-04-07T19:32:02Z
TST: implement test_getitem for Series
diff --git a/pandas/tests/frame/methods/test_duplicated.py b/pandas/tests/frame/methods/test_duplicated.py index 34751b565a24b..82fd6d88b82b9 100644 --- a/pandas/tests/frame/methods/test_duplicated.py +++ b/pandas/tests/frame/methods/test_duplicated.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pandas import DataFrame, Series +from pandas import DataFrame, Series, date_range import pandas._testing as tm @@ -95,3 +95,15 @@ def test_duplicated_on_empty_frame(): result = df[dupes] expected = df.copy() tm.assert_frame_equal(result, expected) + + +def test_frame_datetime64_duplicated(): + dates = date_range("2010-07-01", end="2010-08-05") + + tst = DataFrame({"symbol": "AAA", "date": dates}) + result = tst.duplicated(["date", "symbol"]) + assert (-result).all() + + tst = DataFrame({"date": dates}) + result = tst.duplicated() + assert (-result).all() diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 18c11f2b9eb61..484b97baafebf 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -130,18 +130,6 @@ def test_slicing_datetimes(): tm.assert_frame_equal(result, expected) -def test_frame_datetime64_duplicated(): - dates = date_range("2010-07-01", end="2010-08-05") - - tst = DataFrame({"symbol": "AAA", "date": dates}) - result = tst.duplicated(["date", "symbol"]) - assert (-result).all() - - tst = DataFrame({"date": dates}) - result = tst.duplicated() - assert (-result).all() - - def test_getitem_setitem_datetime_tz_pytz(): from pytz import timezone as tz @@ -353,20 +341,6 @@ def test_getitem_setitem_periodindex(): tm.assert_series_equal(result, ts) -# FutureWarning from NumPy. -@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning") -def test_getitem_median_slice_bug(): - index = date_range("20090415", "20090519", freq="2B") - s = Series(np.random.randn(13), index=index) - - indexer = [slice(6, 7, None)] - with tm.assert_produces_warning(FutureWarning): - # GH#31299 - result = s[indexer] - expected = s[indexer[0]] - tm.assert_series_equal(result, expected) - - def test_datetime_indexing(): index = date_range("1/1/2000", "1/7/2000") diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py new file mode 100644 index 0000000000000..a49bd6d59d01b --- /dev/null +++ b/pandas/tests/series/indexing/test_getitem.py @@ -0,0 +1,100 @@ +""" +Series.__getitem__ test classes are organized by the type of key passed. +""" +from datetime import datetime + +import numpy as np +import pytest + +from pandas._libs.tslibs import conversion, timezones + +import pandas as pd +from pandas import Series, Timestamp, date_range, period_range +import pandas._testing as tm + + +class TestSeriesGetitemScalars: + + # TODO: better name/GH ref? + def test_getitem_regression(self): + ser = Series(range(5), index=list(range(5))) + result = ser[list(range(5))] + tm.assert_series_equal(result, ser) + + # ------------------------------------------------------------------ + # Series with DatetimeIndex + + @pytest.mark.parametrize("tzstr", ["Europe/Berlin", "dateutil/Europe/Berlin"]) + def test_getitem_pydatetime_tz(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + + index = date_range( + start="2012-12-24 16:00", end="2012-12-24 18:00", freq="H", tz=tzstr + ) + ts = Series(index=index, data=index.hour) + time_pandas = Timestamp("2012-12-24 17:00", tz=tzstr) + + dt = datetime(2012, 12, 24, 17, 0) + time_datetime = conversion.localize_pydatetime(dt, tz) + assert ts[time_pandas] == ts[time_datetime] + + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_string_index_alias_tz_aware(self, tz): + rng = date_range("1/1/2000", periods=10, tz=tz) + ser = Series(np.random.randn(len(rng)), index=rng) + + result = ser["1/3/2000"] + tm.assert_almost_equal(result, ser[2]) + + +class TestSeriesGetitemSlices: + def test_getitem_slice_2d(self, datetime_series): + # GH#30588 multi-dimensional indexing deprecated + + # This is currently failing because the test was relying on + # the DeprecationWarning coming through Index.__getitem__. + # We want to implement a warning specifically for Series.__getitem__ + # at which point this will become a Deprecation/FutureWarning + with tm.assert_produces_warning(None): + # GH#30867 Don't want to support this long-term, but + # for now ensure that the warning from Index + # doesn't comes through via Series.__getitem__. + result = datetime_series[:, np.newaxis] + expected = datetime_series.values[:, np.newaxis] + tm.assert_almost_equal(result, expected) + + # FutureWarning from NumPy. + @pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning") + def test_getitem_median_slice_bug(self): + index = date_range("20090415", "20090519", freq="2B") + s = Series(np.random.randn(13), index=index) + + indexer = [slice(6, 7, None)] + with tm.assert_produces_warning(FutureWarning): + # GH#31299 + result = s[indexer] + expected = s[indexer[0]] + tm.assert_series_equal(result, expected) + + +class TestSeriesGetitemListLike: + def test_getitem_intlist_intindex_periodvalues(self): + ser = Series(period_range("2000-01-01", periods=10, freq="D")) + + result = ser[[2, 4]] + exp = pd.Series( + [pd.Period("2000-01-03", freq="D"), pd.Period("2000-01-05", freq="D")], + index=[2, 4], + dtype="Period[D]", + ) + tm.assert_series_equal(result, exp) + assert result.dtype == "Period[D]" + + +def test_getitem_generator(string_series): + gen = (x > 0 for x in string_series) + result = string_series[gen] + result2 = string_series[iter(string_series > 0)] + expected = string_series[string_series > 0] + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index f7c7457f3a703..522ed4df96ad2 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -119,15 +119,6 @@ def test_getitem_fancy(string_series, object_series): assert object_series[2] == slice2[1] -def test_getitem_generator(string_series): - gen = (x > 0 for x in string_series) - result = string_series[gen] - result2 = string_series[iter(string_series > 0)] - expected = string_series[string_series > 0] - tm.assert_series_equal(result, expected) - tm.assert_series_equal(result2, expected) - - def test_type_promotion(): # GH12599 s = pd.Series(dtype=object) @@ -872,16 +863,6 @@ def test_uint_drop(any_int_dtype): tm.assert_series_equal(series, expected) -def test_getitem_2d_no_warning(): - # https://github.com/pandas-dev/pandas/issues/30867 - # Don't want to support this long-term, but - # for now ensure that the warning from Index - # doesn't comes through via Series.__getitem__. - series = pd.Series([1, 2, 3], index=[1, 2, 3]) - with tm.assert_produces_warning(None): - series[:, None] - - def test_getitem_unrecognized_scalar(): # GH#32684 a scalar key that is not recognized by lib.is_scalar diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py index 5a648cc588957..b5bef46e95ec2 100644 --- a/pandas/tests/series/indexing/test_numeric.py +++ b/pandas/tests/series/indexing/test_numeric.py @@ -40,12 +40,6 @@ def test_getitem_negative_out_of_bounds(): s[-11] = "foo" -def test_getitem_regression(): - s = Series(range(5), index=list(range(5))) - result = s[list(range(5))] - tm.assert_series_equal(result, s) - - def test_getitem_setitem_slice_bug(): s = Series(range(10), index=list(range(10))) result = s[-12:] diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index 5c2c1db14e70f..b54c09e5750fd 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -20,18 +20,6 @@ def test_auto_conversion(self): ) assert series.dtype == "Period[D]" - def test_getitem(self): - assert self.series[1] == pd.Period("2000-01-02", freq="D") - - result = self.series[[2, 4]] - exp = pd.Series( - [pd.Period("2000-01-03", freq="D"), pd.Period("2000-01-05", freq="D")], - index=[2, 4], - dtype="Period[D]", - ) - tm.assert_series_equal(result, exp) - assert result.dtype == "Period[D]" - def test_isna(self): # GH 13737 s = Series([pd.Period("2011-01", freq="M"), pd.Period("NaT", freq="M")]) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 6ca67b6cc8429..3c3108835416a 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -7,18 +7,6 @@ class TestTimeSeries: - def test_mpl_compat_hack(self, datetime_series): - - # This is currently failing because the test was relying on - # the DeprecationWarning coming through Index.__getitem__. - # We want to implement a warning specifically for Series.__getitem__ - # at which point this will become a Deprecation/FutureWarning - with tm.assert_produces_warning(None): - # GH#30588 multi-dimensional indexing deprecated - result = datetime_series[:, np.newaxis] - expected = datetime_series.values[:, np.newaxis] - tm.assert_almost_equal(result, expected) - def test_timeseries_coercion(self): idx = tm.makeDateIndex(10000) ser = Series(np.random.randn(len(idx)), idx.astype(object)) diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index dfff1d581fe44..05792dc4f00d2 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -7,9 +7,7 @@ import numpy as np import pytest -from pandas._libs.tslibs import conversion, timezones - -from pandas import Series, Timestamp +from pandas import Series import pandas._testing as tm from pandas.core.indexes.datetimes import date_range @@ -29,28 +27,6 @@ def test_dateutil_tzoffset_support(self): # it works! #2443 repr(series.index[0]) - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) - def test_string_index_alias_tz_aware(self, tz): - rng = date_range("1/1/2000", periods=10, tz=tz) - ser = Series(np.random.randn(len(rng)), index=rng) - - result = ser["1/3/2000"] - tm.assert_almost_equal(result, ser[2]) - - @pytest.mark.parametrize("tzstr", ["Europe/Berlin", "dateutil/Europe/Berlin"]) - def test_getitem_pydatetime_tz(self, tzstr): - tz = timezones.maybe_get_tz(tzstr) - - index = date_range( - start="2012-12-24 16:00", end="2012-12-24 18:00", freq="H", tz=tzstr - ) - ts = Series(index=index, data=index.hour) - time_pandas = Timestamp("2012-12-24 17:00", tz=tzstr) - - dt = datetime(2012, 12, 24, 17, 0) - time_datetime = conversion.localize_pydatetime(dt, tz) - assert ts[time_pandas] == ts[time_datetime] - @pytest.mark.parametrize("copy", [True, False]) @pytest.mark.parametrize( "method, tz", [["tz_localize", None], ["tz_convert", "Europe/Berlin"]]
Lime with #33348, lots of scattered stuff is going to end up here
https://api.github.com/repos/pandas-dev/pandas/pulls/33353
2020-04-07T02:41:11Z
2020-04-07T16:27:29Z
2020-04-07T16:27:29Z
2020-04-07T16:43:46Z
CI: Add argument doc/source/development to formatting docstrings in code_checks.sh
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 1bdbbb54a0aac..0454150f61045 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -348,7 +348,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Validate correct capitalization among titles in documentation' ; echo $MSG - $BASE_DIR/scripts/validate_rst_title_capitalization.py $BASE_DIR/doc/source/development/contributing.rst $BASE_DIR/doc/source/reference + $BASE_DIR/scripts/validate_rst_title_capitalization.py $BASE_DIR/doc/source/development $BASE_DIR/doc/source/reference RET=$(($RET + $?)) ; echo $MSG "DONE" fi
Issue #32550
https://api.github.com/repos/pandas-dev/pandas/pulls/33352
2020-04-07T02:34:42Z
2020-04-07T12:28:53Z
2020-04-07T12:28:53Z
2020-04-07T20:21:12Z
ENH: Implement StringArray.min / max
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index cd1cb0b64f74a..0d7bb75b060e2 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -499,6 +499,7 @@ Strings ^^^^^^^ - Bug in the :meth:`~Series.astype` method when converting "string" dtype data to nullable integer dtype (:issue:`32450`). +- Fixed issue where taking ``min`` or ``max`` of a ``StringArray`` or ``Series`` with ``StringDtype`` type would raise. (:issue:`31746`) - Bug in :meth:`Series.str.cat` returning ``NaN`` output when other had :class:`Index` type (:issue:`33425`) diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index 260cc69187d38..e988f5d97f7ee 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -218,7 +218,7 @@ def validate_cum_func_with_skipna(skipna, args, kwargs, name): LOGICAL_FUNC_DEFAULTS = dict(out=None, keepdims=False) validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs") -MINMAX_DEFAULTS = dict(out=None, keepdims=False) +MINMAX_DEFAULTS = dict(axis=None, out=None, keepdims=False) validate_min = CompatValidator( MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1 ) diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 3058e1d6073f3..e9950e0edaffb 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -5,6 +5,7 @@ from numpy.lib.mixins import NDArrayOperatorsMixin from pandas._libs import lib +from pandas._typing import Scalar from pandas.compat.numpy import function as nv from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs @@ -17,6 +18,7 @@ from pandas import compat from pandas.core import nanops from pandas.core.algorithms import searchsorted, take, unique +from pandas.core.array_algos import masked_reductions from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin from pandas.core.construction import extract_array from pandas.core.indexers import check_array_indexer @@ -349,13 +351,19 @@ def all(self, axis=None, out=None, keepdims=False, skipna=True): nv.validate_all((), dict(out=out, keepdims=keepdims)) return nanops.nanall(self._ndarray, axis=axis, skipna=skipna) - def min(self, axis=None, out=None, keepdims=False, skipna=True): - nv.validate_min((), dict(out=out, keepdims=keepdims)) - return nanops.nanmin(self._ndarray, axis=axis, skipna=skipna) + def min(self, skipna: bool = True, **kwargs) -> Scalar: + nv.validate_min((), kwargs) + result = masked_reductions.min( + values=self.to_numpy(), mask=self.isna(), skipna=skipna + ) + return result - def max(self, axis=None, out=None, keepdims=False, skipna=True): - nv.validate_max((), dict(out=out, keepdims=keepdims)) - return nanops.nanmax(self._ndarray, axis=axis, skipna=skipna) + def max(self, skipna: bool = True, **kwargs) -> Scalar: + nv.validate_max((), kwargs) + result = masked_reductions.max( + values=self.to_numpy(), mask=self.isna(), skipna=skipna + ) + return result def sum( self, diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index dbca8e74f5e1b..51bbe182a002b 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -282,6 +282,9 @@ def astype(self, dtype, copy=True): return super().astype(dtype, copy) def _reduce(self, name, skipna=True, **kwargs): + if name in ["min", "max"]: + return getattr(self, name)(skipna=skipna) + raise TypeError(f"Cannot perform reduction '{name}' with string dtype") def value_counts(self, dropna=False): diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index fe770eed84b62..eb89798a1ad96 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -230,6 +230,32 @@ def test_reduce(skipna): assert result == "abc" +@pytest.mark.parametrize("method", ["min", "max"]) +@pytest.mark.parametrize("skipna", [True, False]) +def test_min_max(method, skipna): + arr = pd.Series(["a", "b", "c", None], dtype="string") + result = getattr(arr, method)(skipna=skipna) + if skipna: + expected = "a" if method == "min" else "c" + assert result == expected + else: + assert result is pd.NA + + +@pytest.mark.parametrize("method", ["min", "max"]) +@pytest.mark.parametrize( + "arr", + [ + pd.Series(["a", "b", "c", None], dtype="string"), + pd.array(["a", "b", "c", None], dtype="string"), + ], +) +def test_min_max_numpy(method, arr): + result = getattr(np, method)(arr) + expected = "a" if method == "min" else "c" + assert result == expected + + @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.xfail(reason="Not implemented StringArray.sum") def test_reduce_missing(skipna): diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py index 86aed671f1b88..27a157d2127f6 100644 --- a/pandas/tests/extension/test_string.py +++ b/pandas/tests/extension/test_string.py @@ -77,7 +77,16 @@ class TestMissing(base.BaseMissingTests): class TestNoReduce(base.BaseNoReduceTests): - pass + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): + op_name = all_numeric_reductions + + if op_name in ["min", "max"]: + return None + + s = pd.Series(data) + with pytest.raises(TypeError): + getattr(s, op_name)(skipna=skipna) class TestMethods(base.BaseMethodsTests):
- [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Using the new masked reductions from @jorisvandenbossche to implement these for StringArray. Part of https://github.com/pandas-dev/pandas/issues/31746 but doesn't close because we're not adding sum here.
https://api.github.com/repos/pandas-dev/pandas/pulls/33351
2020-04-07T02:03:12Z
2020-04-25T08:01:51Z
2020-04-25T08:01:51Z
2020-04-25T13:31:46Z
REF: DatetimeIndex test_insert, test_delete
diff --git a/pandas/tests/indexes/datetimes/test_delete.py b/pandas/tests/indexes/datetimes/test_delete.py new file mode 100644 index 0000000000000..4fbb440bc89e5 --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_delete.py @@ -0,0 +1,134 @@ +import pytest + +from pandas import DatetimeIndex, Series, date_range +import pandas._testing as tm + + +class TestDelete: + def test_delete(self): + idx = date_range(start="2000-01-01", periods=5, freq="M", name="idx") + + # preserve freq + expected_0 = date_range(start="2000-02-01", periods=4, freq="M", name="idx") + expected_4 = date_range(start="2000-01-01", periods=4, freq="M", name="idx") + + # reset freq to None + expected_1 = DatetimeIndex( + ["2000-01-31", "2000-03-31", "2000-04-30", "2000-05-31"], + freq=None, + name="idx", + ) + + cases = { + 0: expected_0, + -5: expected_0, + -1: expected_4, + 4: expected_4, + 1: expected_1, + } + for n, expected in cases.items(): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + with pytest.raises((IndexError, ValueError), match="out of bounds"): + # either depending on numpy version + idx.delete(5) + + for tz in [None, "Asia/Tokyo", "US/Pacific"]: + idx = date_range( + start="2000-01-01 09:00", periods=10, freq="H", name="idx", tz=tz + ) + + expected = date_range( + start="2000-01-01 10:00", periods=9, freq="H", name="idx", tz=tz + ) + result = idx.delete(0) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freqstr == "H" + assert result.tz == expected.tz + + expected = date_range( + start="2000-01-01 09:00", periods=9, freq="H", name="idx", tz=tz + ) + result = idx.delete(-1) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freqstr == "H" + assert result.tz == expected.tz + + def test_delete_slice(self): + idx = date_range(start="2000-01-01", periods=10, freq="D", name="idx") + + # preserve freq + expected_0_2 = date_range(start="2000-01-04", periods=7, freq="D", name="idx") + expected_7_9 = date_range(start="2000-01-01", periods=7, freq="D", name="idx") + + # reset freq to None + expected_3_5 = DatetimeIndex( + [ + "2000-01-01", + "2000-01-02", + "2000-01-03", + "2000-01-07", + "2000-01-08", + "2000-01-09", + "2000-01-10", + ], + freq=None, + name="idx", + ) + + cases = { + (0, 1, 2): expected_0_2, + (7, 8, 9): expected_7_9, + (3, 4, 5): expected_3_5, + } + for n, expected in cases.items(): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + result = idx.delete(slice(n[0], n[-1] + 1)) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + for tz in [None, "Asia/Tokyo", "US/Pacific"]: + ts = Series( + 1, + index=date_range( + "2000-01-01 09:00", periods=10, freq="H", name="idx", tz=tz + ), + ) + # preserve freq + result = ts.drop(ts.index[:5]).index + expected = date_range( + "2000-01-01 14:00", periods=5, freq="H", name="idx", tz=tz + ) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz + + # reset freq to None + result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index + expected = DatetimeIndex( + [ + "2000-01-01 09:00", + "2000-01-01 11:00", + "2000-01-01 13:00", + "2000-01-01 15:00", + "2000-01-01 17:00", + ], + freq=None, + name="idx", + tz=tz, + ) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 58e2afc869e02..c2f827bb516f0 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -2,7 +2,6 @@ import numpy as np import pytest -import pytz import pandas as pd from pandas import DatetimeIndex, Index, Timestamp, date_range, notna @@ -484,279 +483,6 @@ def test_index_dupes_contains(self): class TestDatetimeIndex: - @pytest.mark.parametrize( - "null", [None, np.nan, np.datetime64("NaT"), pd.NaT, pd.NA] - ) - @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) - def test_insert_nat(self, tz, null): - # GH#16537, GH#18295 (test missing) - idx = pd.DatetimeIndex(["2017-01-01"], tz=tz) - expected = pd.DatetimeIndex(["NaT", "2017-01-01"], tz=tz) - res = idx.insert(0, null) - tm.assert_index_equal(res, expected) - - @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) - def test_insert_invalid_na(self, tz): - idx = pd.DatetimeIndex(["2017-01-01"], tz=tz) - with pytest.raises(TypeError, match="incompatible label"): - idx.insert(0, np.timedelta64("NaT")) - - def test_insert(self): - idx = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"], name="idx") - - result = idx.insert(2, datetime(2000, 1, 5)) - exp = DatetimeIndex( - ["2000-01-04", "2000-01-01", "2000-01-05", "2000-01-02"], name="idx" - ) - tm.assert_index_equal(result, exp) - - # insertion of non-datetime should coerce to object index - result = idx.insert(1, "inserted") - expected = Index( - [ - datetime(2000, 1, 4), - "inserted", - datetime(2000, 1, 1), - datetime(2000, 1, 2), - ], - name="idx", - ) - assert not isinstance(result, DatetimeIndex) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - - idx = date_range("1/1/2000", periods=3, freq="M", name="idx") - - # preserve freq - expected_0 = DatetimeIndex( - ["1999-12-31", "2000-01-31", "2000-02-29", "2000-03-31"], - name="idx", - freq="M", - ) - expected_3 = DatetimeIndex( - ["2000-01-31", "2000-02-29", "2000-03-31", "2000-04-30"], - name="idx", - freq="M", - ) - - # reset freq to None - expected_1_nofreq = DatetimeIndex( - ["2000-01-31", "2000-01-31", "2000-02-29", "2000-03-31"], - name="idx", - freq=None, - ) - expected_3_nofreq = DatetimeIndex( - ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], - name="idx", - freq=None, - ) - - cases = [ - (0, datetime(1999, 12, 31), expected_0), - (-3, datetime(1999, 12, 31), expected_0), - (3, datetime(2000, 4, 30), expected_3), - (1, datetime(2000, 1, 31), expected_1_nofreq), - (3, datetime(2000, 1, 2), expected_3_nofreq), - ] - - for n, d, expected in cases: - result = idx.insert(n, d) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - - # reset freq to None - result = idx.insert(3, datetime(2000, 1, 2)) - expected = DatetimeIndex( - ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], - name="idx", - freq=None, - ) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq is None - - # see gh-7299 - idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx") - with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"): - idx.insert(3, pd.Timestamp("2000-01-04")) - with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"): - idx.insert(3, datetime(2000, 1, 4)) - with pytest.raises(ValueError, match="Timezones don't match"): - idx.insert(3, pd.Timestamp("2000-01-04", tz="US/Eastern")) - with pytest.raises(ValueError, match="Timezones don't match"): - idx.insert(3, datetime(2000, 1, 4, tzinfo=pytz.timezone("US/Eastern"))) - - for tz in ["US/Pacific", "Asia/Singapore"]: - idx = date_range("1/1/2000 09:00", periods=6, freq="H", tz=tz, name="idx") - # preserve freq - expected = date_range( - "1/1/2000 09:00", periods=7, freq="H", tz=tz, name="idx" - ) - for d in [ - pd.Timestamp("2000-01-01 15:00", tz=tz), - pytz.timezone(tz).localize(datetime(2000, 1, 1, 15)), - ]: - - result = idx.insert(6, d) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - assert result.tz == expected.tz - - expected = DatetimeIndex( - [ - "2000-01-01 09:00", - "2000-01-01 10:00", - "2000-01-01 11:00", - "2000-01-01 12:00", - "2000-01-01 13:00", - "2000-01-01 14:00", - "2000-01-01 10:00", - ], - name="idx", - tz=tz, - freq=None, - ) - # reset freq to None - for d in [ - pd.Timestamp("2000-01-01 10:00", tz=tz), - pytz.timezone(tz).localize(datetime(2000, 1, 1, 10)), - ]: - result = idx.insert(6, d) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.tz == expected.tz - assert result.freq is None - - def test_delete(self): - idx = date_range(start="2000-01-01", periods=5, freq="M", name="idx") - - # preserve freq - expected_0 = date_range(start="2000-02-01", periods=4, freq="M", name="idx") - expected_4 = date_range(start="2000-01-01", periods=4, freq="M", name="idx") - - # reset freq to None - expected_1 = DatetimeIndex( - ["2000-01-31", "2000-03-31", "2000-04-30", "2000-05-31"], - freq=None, - name="idx", - ) - - cases = { - 0: expected_0, - -5: expected_0, - -1: expected_4, - 4: expected_4, - 1: expected_1, - } - for n, expected in cases.items(): - result = idx.delete(n) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - - with pytest.raises((IndexError, ValueError), match="out of bounds"): - # either depending on numpy version - idx.delete(5) - - for tz in [None, "Asia/Tokyo", "US/Pacific"]: - idx = date_range( - start="2000-01-01 09:00", periods=10, freq="H", name="idx", tz=tz - ) - - expected = date_range( - start="2000-01-01 10:00", periods=9, freq="H", name="idx", tz=tz - ) - result = idx.delete(0) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freqstr == "H" - assert result.tz == expected.tz - - expected = date_range( - start="2000-01-01 09:00", periods=9, freq="H", name="idx", tz=tz - ) - result = idx.delete(-1) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freqstr == "H" - assert result.tz == expected.tz - - def test_delete_slice(self): - idx = date_range(start="2000-01-01", periods=10, freq="D", name="idx") - - # preserve freq - expected_0_2 = date_range(start="2000-01-04", periods=7, freq="D", name="idx") - expected_7_9 = date_range(start="2000-01-01", periods=7, freq="D", name="idx") - - # reset freq to None - expected_3_5 = DatetimeIndex( - [ - "2000-01-01", - "2000-01-02", - "2000-01-03", - "2000-01-07", - "2000-01-08", - "2000-01-09", - "2000-01-10", - ], - freq=None, - name="idx", - ) - - cases = { - (0, 1, 2): expected_0_2, - (7, 8, 9): expected_7_9, - (3, 4, 5): expected_3_5, - } - for n, expected in cases.items(): - result = idx.delete(n) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - - result = idx.delete(slice(n[0], n[-1] + 1)) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - - for tz in [None, "Asia/Tokyo", "US/Pacific"]: - ts = pd.Series( - 1, - index=pd.date_range( - "2000-01-01 09:00", periods=10, freq="H", name="idx", tz=tz - ), - ) - # preserve freq - result = ts.drop(ts.index[:5]).index - expected = pd.date_range( - "2000-01-01 14:00", periods=5, freq="H", name="idx", tz=tz - ) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - assert result.tz == expected.tz - - # reset freq to None - result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index - expected = DatetimeIndex( - [ - "2000-01-01 09:00", - "2000-01-01 11:00", - "2000-01-01 13:00", - "2000-01-01 15:00", - "2000-01-01 17:00", - ], - freq=None, - name="idx", - tz=tz, - ) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - assert result.tz == expected.tz - def test_get_value(self): # specifically make sure we have test for np.datetime64 key dti = pd.date_range("2016-01-01", periods=3) diff --git a/pandas/tests/indexes/datetimes/test_insert.py b/pandas/tests/indexes/datetimes/test_insert.py new file mode 100644 index 0000000000000..4abb4f0006444 --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_insert.py @@ -0,0 +1,153 @@ +from datetime import datetime + +import numpy as np +import pytest +import pytz + +from pandas import NA, DatetimeIndex, Index, NaT, Timestamp, date_range +import pandas._testing as tm + + +class TestInsert: + @pytest.mark.parametrize("null", [None, np.nan, np.datetime64("NaT"), NaT, NA]) + @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) + def test_insert_nat(self, tz, null): + # GH#16537, GH#18295 (test missing) + idx = DatetimeIndex(["2017-01-01"], tz=tz) + expected = DatetimeIndex(["NaT", "2017-01-01"], tz=tz) + res = idx.insert(0, null) + tm.assert_index_equal(res, expected) + + @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) + def test_insert_invalid_na(self, tz): + idx = DatetimeIndex(["2017-01-01"], tz=tz) + with pytest.raises(TypeError, match="incompatible label"): + idx.insert(0, np.timedelta64("NaT")) + + def test_insert(self): + idx = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"], name="idx") + + result = idx.insert(2, datetime(2000, 1, 5)) + exp = DatetimeIndex( + ["2000-01-04", "2000-01-01", "2000-01-05", "2000-01-02"], name="idx" + ) + tm.assert_index_equal(result, exp) + + # insertion of non-datetime should coerce to object index + result = idx.insert(1, "inserted") + expected = Index( + [ + datetime(2000, 1, 4), + "inserted", + datetime(2000, 1, 1), + datetime(2000, 1, 2), + ], + name="idx", + ) + assert not isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + idx = date_range("1/1/2000", periods=3, freq="M", name="idx") + + # preserve freq + expected_0 = DatetimeIndex( + ["1999-12-31", "2000-01-31", "2000-02-29", "2000-03-31"], + name="idx", + freq="M", + ) + expected_3 = DatetimeIndex( + ["2000-01-31", "2000-02-29", "2000-03-31", "2000-04-30"], + name="idx", + freq="M", + ) + + # reset freq to None + expected_1_nofreq = DatetimeIndex( + ["2000-01-31", "2000-01-31", "2000-02-29", "2000-03-31"], + name="idx", + freq=None, + ) + expected_3_nofreq = DatetimeIndex( + ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], + name="idx", + freq=None, + ) + + cases = [ + (0, datetime(1999, 12, 31), expected_0), + (-3, datetime(1999, 12, 31), expected_0), + (3, datetime(2000, 4, 30), expected_3), + (1, datetime(2000, 1, 31), expected_1_nofreq), + (3, datetime(2000, 1, 2), expected_3_nofreq), + ] + + for n, d, expected in cases: + result = idx.insert(n, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + # reset freq to None + result = idx.insert(3, datetime(2000, 1, 2)) + expected = DatetimeIndex( + ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], + name="idx", + freq=None, + ) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq is None + + # see gh-7299 + idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx") + with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"): + idx.insert(3, Timestamp("2000-01-04")) + with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"): + idx.insert(3, datetime(2000, 1, 4)) + with pytest.raises(ValueError, match="Timezones don't match"): + idx.insert(3, Timestamp("2000-01-04", tz="US/Eastern")) + with pytest.raises(ValueError, match="Timezones don't match"): + idx.insert(3, datetime(2000, 1, 4, tzinfo=pytz.timezone("US/Eastern"))) + + for tz in ["US/Pacific", "Asia/Singapore"]: + idx = date_range("1/1/2000 09:00", periods=6, freq="H", tz=tz, name="idx") + # preserve freq + expected = date_range( + "1/1/2000 09:00", periods=7, freq="H", tz=tz, name="idx" + ) + for d in [ + Timestamp("2000-01-01 15:00", tz=tz), + pytz.timezone(tz).localize(datetime(2000, 1, 1, 15)), + ]: + + result = idx.insert(6, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz + + expected = DatetimeIndex( + [ + "2000-01-01 09:00", + "2000-01-01 10:00", + "2000-01-01 11:00", + "2000-01-01 12:00", + "2000-01-01 13:00", + "2000-01-01 14:00", + "2000-01-01 10:00", + ], + name="idx", + tz=tz, + freq=None, + ) + # reset freq to None + for d in [ + Timestamp("2000-01-01 10:00", tz=tz), + pytz.timezone(tz).localize(datetime(2000, 1, 1, 10)), + ]: + result = idx.insert(6, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.tz == expected.tz + assert result.freq is None
https://api.github.com/repos/pandas-dev/pandas/pulls/33350
2020-04-07T01:58:09Z
2020-04-07T20:57:05Z
2020-04-07T20:57:05Z
2020-04-07T21:09:55Z
REF: misplaced tests in test_partial_slicing files
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 58e2afc869e02..0e9af273823cb 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -25,6 +25,13 @@ def test_ellipsis(self): assert result.equals(idx) assert result is not idx + def test_getitem_slice_keeps_name(self): + # GH4226 + st = pd.Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles") + et = pd.Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles") + dr = pd.date_range(st, et, freq="H", name="timebucket") + assert dr[1:].name == dr.name + def test_getitem(self): idx1 = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx") idx2 = pd.date_range( @@ -119,6 +126,21 @@ def test_dti_custom_getitem_matplotlib_hackaround(self): expected = rng.values[:, None] tm.assert_numpy_array_equal(values, expected) + def test_getitem_int_list(self): + dti = date_range(start="1/1/2005", end="12/1/2005", freq="M") + dti2 = dti[[1, 3, 5]] + + v1 = dti2[0] + v2 = dti2[1] + v3 = dti2[2] + + assert v1 == Timestamp("2/28/2005") + assert v2 == Timestamp("4/30/2005") + assert v3 == Timestamp("6/30/2005") + + # getitem with non-slice drops freq + assert dti2.freq is None + class TestWhere: def test_where_doesnt_retain_freq(self): @@ -483,6 +505,69 @@ def test_index_dupes_contains(self): assert d in ix +class TestGetIndexer: + def test_get_indexer(self): + idx = pd.date_range("2000-01-01", periods=3) + exp = np.array([0, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(idx.get_indexer(idx), exp) + + target = idx[0] + pd.to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"]) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 hour")), + np.array([0, -1, 1], dtype=np.intp), + ) + tol_raw = [ + pd.Timedelta("1 hour"), + pd.Timedelta("1 hour"), + pd.Timedelta("1 hour").to_timedelta64(), + ] + tm.assert_numpy_array_equal( + idx.get_indexer( + target, "nearest", tolerance=[np.timedelta64(x) for x in tol_raw] + ), + np.array([0, -1, 1], dtype=np.intp), + ) + tol_bad = [ + pd.Timedelta("2 hour").to_timedelta64(), + pd.Timedelta("1 hour").to_timedelta64(), + "foo", + ] + with pytest.raises(ValueError, match="abbreviation w/o a number"): + idx.get_indexer(target, "nearest", tolerance=tol_bad) + with pytest.raises(ValueError, match="abbreviation w/o a number"): + idx.get_indexer(idx[[0]], method="nearest", tolerance="foo") + + +class TestMaybeCastSliceBound: + def test_maybe_cast_slice_bounds_empty(self): + # GH#14354 + empty_idx = date_range(freq="1H", periods=0, end="2015") + + right = empty_idx._maybe_cast_slice_bound("2015-01-02", "right", "loc") + exp = Timestamp("2015-01-02 23:59:59.999999999") + assert right == exp + + left = empty_idx._maybe_cast_slice_bound("2015-01-02", "left", "loc") + exp = Timestamp("2015-01-02 00:00:00") + assert left == exp + + def test_maybe_cast_slice_duplicate_monotonic(self): + # https://github.com/pandas-dev/pandas/issues/16515 + idx = DatetimeIndex(["2017", "2017"]) + result = idx._maybe_cast_slice_bound("2017-01-01", "left", "loc") + expected = Timestamp("2017-01-01") + assert result == expected + + class TestDatetimeIndex: @pytest.mark.parametrize( "null", [None, np.nan, np.datetime64("NaT"), pd.NaT, pd.NA] @@ -777,43 +862,3 @@ def test_get_value(self): result = dti.get_value(ser, key.to_datetime64()) assert result == 7 - - def test_get_indexer(self): - idx = pd.date_range("2000-01-01", periods=3) - exp = np.array([0, 1, 2], dtype=np.intp) - tm.assert_numpy_array_equal(idx.get_indexer(idx), exp) - - target = idx[0] + pd.to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"]) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp) - ) - tm.assert_numpy_array_equal( - idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 hour")), - np.array([0, -1, 1], dtype=np.intp), - ) - tol_raw = [ - pd.Timedelta("1 hour"), - pd.Timedelta("1 hour"), - pd.Timedelta("1 hour").to_timedelta64(), - ] - tm.assert_numpy_array_equal( - idx.get_indexer( - target, "nearest", tolerance=[np.timedelta64(x) for x in tol_raw] - ), - np.array([0, -1, 1], dtype=np.intp), - ) - tol_bad = [ - pd.Timedelta("2 hour").to_timedelta64(), - pd.Timedelta("1 hour").to_timedelta64(), - "foo", - ] - with pytest.raises(ValueError, match="abbreviation w/o a number"): - idx.get_indexer(target, "nearest", tolerance=tol_bad) - with pytest.raises(ValueError, match="abbreviation w/o a number"): - idx.get_indexer(idx[[0]], method="nearest", tolerance="foo") diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 946d658e90132..ddde30d0f8fbf 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -21,28 +21,6 @@ class TestSlicing: - def test_dti_slicing(self): - dti = date_range(start="1/1/2005", end="12/1/2005", freq="M") - dti2 = dti[[1, 3, 5]] - - v1 = dti2[0] - v2 = dti2[1] - v3 = dti2[2] - - assert v1 == Timestamp("2/28/2005") - assert v2 == Timestamp("4/30/2005") - assert v3 == Timestamp("6/30/2005") - - # don't carry freq through irregular slicing - assert dti2.freq is None - - def test_slice_keeps_name(self): - # GH4226 - st = pd.Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles") - et = pd.Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles") - dr = pd.date_range(st, et, freq="H", name="timebucket") - assert dr[1:].name == dr.name - def test_slice_with_negative_step(self): ts = Series(np.arange(20), date_range("2014-01-01", periods=20, freq="MS")) SLC = pd.IndexSlice @@ -80,25 +58,6 @@ def test_slice_with_zero_step_raises(self): with pytest.raises(ValueError, match="slice step cannot be zero"): ts.loc[::0] - def test_slice_bounds_empty(self): - # GH#14354 - empty_idx = date_range(freq="1H", periods=0, end="2015") - - right = empty_idx._maybe_cast_slice_bound("2015-01-02", "right", "loc") - exp = Timestamp("2015-01-02 23:59:59.999999999") - assert right == exp - - left = empty_idx._maybe_cast_slice_bound("2015-01-02", "left", "loc") - exp = Timestamp("2015-01-02 00:00:00") - assert left == exp - - def test_slice_duplicate_monotonic(self): - # https://github.com/pandas-dev/pandas/issues/16515 - idx = pd.DatetimeIndex(["2017", "2017"]) - result = idx._maybe_cast_slice_bound("2017-01-01", "left", "loc") - expected = Timestamp("2017-01-01") - assert result == expected - def test_monotone_DTI_indexing_bug(self): # GH 19362 # Testing accessing the first element in a monotonic descending diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 39688e5b92380..c4aaf6332ba15 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -31,6 +31,10 @@ def test_ellipsis(self): assert result.equals(idx) assert result is not idx + def test_getitem_slice_keeps_name(self): + idx = period_range("20010101", periods=10, freq="D", name="bob") + assert idx.name == idx[1:].name + def test_getitem(self): idx1 = period_range("2011-01-01", "2011-01-31", freq="D", name="idx") diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py index c0597180184a6..ad9ee7bd2594d 100644 --- a/pandas/tests/indexes/period/test_partial_slicing.py +++ b/pandas/tests/indexes/period/test_partial_slicing.py @@ -40,10 +40,6 @@ def test_slice_with_zero_step_raises(self): with pytest.raises(ValueError, match="slice step cannot be zero"): ts.loc[::0] - def test_slice_keep_name(self): - idx = period_range("20010101", periods=10, freq="D", name="bob") - assert idx.name == idx[1:].name - def test_pindex_slice_index(self): pi = period_range(start="1/1/10", end="12/31/12", freq="M") s = Series(np.random.rand(len(pi)), index=pi) diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index 72d7763b549e7..8c39a9c40a69b 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -18,6 +18,11 @@ def test_ellipsis(self): assert result.equals(idx) assert result is not idx + def test_getitem_slice_keeps_name(self): + # GH#4226 + tdi = timedelta_range("1d", "5d", freq="H", name="timebucket") + assert tdi[1:].name == tdi.name + def test_getitem(self): idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx") diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py index 29e2c7dd20be0..a0ef953db3600 100644 --- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py +++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py @@ -7,11 +7,6 @@ class TestSlicing: - def test_slice_keeps_name(self): - # GH4226 - dr = pd.timedelta_range("1d", "5d", freq="H", name="timebucket") - assert dr[1:].name == dr.name - def test_partial_slice(self): rng = timedelta_range("1 day 10:11:12", freq="h", periods=500) s = Series(np.arange(len(rng)), index=rng)
Whats left in these after this are all testing Series methods, so belong elsewhere.
https://api.github.com/repos/pandas-dev/pandas/pulls/33349
2020-04-07T01:49:47Z
2020-04-07T19:09:24Z
2020-04-07T19:09:24Z
2020-04-07T19:13:10Z
TST: implement test_setitem for Series
diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 58e2afc869e02..0df8f7f47f617 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -477,7 +477,7 @@ def test_get_loc_reasonable_key_error(self): class TestContains: - def test_index_dupes_contains(self): + def test_dti_contains_with_duplicates(self): d = datetime(2011, 12, 5, 20, 30) ix = DatetimeIndex([d, d]) assert d in ix diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 18c11f2b9eb61..cf91d1fed58c4 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -684,21 +684,6 @@ def test_indexing(): """ -def test_set_none_nan(): - series = Series(date_range("1/1/2000", periods=10)) - series[3] = None - assert series[3] is NaT - - series[3:5] = None - assert series[4] is NaT - - series[5] = np.nan - assert series[5] is NaT - - series[5:7] = np.nan - assert series[6] is NaT - - def test_setitem_tuple_with_datetimetz(): # GH 20441 arr = date_range("2017", periods=4, tz="US/Eastern") diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py new file mode 100644 index 0000000000000..3463de25ad91b --- /dev/null +++ b/pandas/tests/series/indexing/test_setitem.py @@ -0,0 +1,19 @@ +import numpy as np + +from pandas import NaT, Series, date_range + + +class TestSetitemDT64Values: + def test_setitem_none_nan(self): + series = Series(date_range("1/1/2000", periods=10)) + series[3] = None + assert series[3] is NaT + + series[3:5] = None + assert series[4] is NaT + + series[5] = np.nan + assert series[5] is NaT + + series[5:7] = np.nan + assert series[6] is NaT
Lots of scattered tests that belong here. Also lots of scattered local branches.
https://api.github.com/repos/pandas-dev/pandas/pulls/33348
2020-04-07T01:44:55Z
2020-04-07T16:24:08Z
2020-04-07T16:24:08Z
2020-04-07T16:44:26Z
REF: remove BlockManager.set
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2878204f5ee79..e1beeff3f2005 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3217,7 +3217,8 @@ def _maybe_cache_changed(self, item, value) -> None: """ The object has called back to us saying maybe it has changed. """ - self._mgr.set(item, value) + loc = self._info_axis.get_loc(item) + self._mgr.iset(loc, value) @property def _is_cached(self) -> bool_t: @@ -3594,8 +3595,14 @@ def _iset_item(self, loc: int, value) -> None: self._clear_item_cache() def _set_item(self, key, value) -> None: - self._mgr.set(key, value) - self._clear_item_cache() + try: + loc = self._info_axis.get_loc(key) + except KeyError: + # This item wasn't present, just insert at end + self._mgr.insert(len(self._info_axis), key, value) + return + + NDFrame._iset_item(self, loc, value) def _set_is_copy(self, ref, copy: bool_t = True) -> None: if not copy: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 80ba9cdb29916..dd950c0276646 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -983,24 +983,6 @@ def idelete(self, indexer): ) self._rebuild_blknos_and_blklocs() - def set(self, item: Label, value): - """ - Set new item in-place. - - Notes - ----- - Does not consolidate. - Adds new Block if not contained in the current items Index. - """ - try: - loc = self.items.get_loc(item) - except KeyError: - # This item wasn't present, just insert at end - self.insert(len(self.items), item, value) - return - - self.iset(loc, value) - def iset(self, loc: Union[int, slice, np.ndarray], value): """ Set new item in-place. Does not consolidate. Adds new Block if not diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index f1d4c865a0ced..b2239c077bd69 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -342,8 +342,8 @@ def test_iget(self): def test_set(self): mgr = create_mgr("a,b,c: int", item_shape=(3,)) - mgr.set("d", np.array(["foo"] * 3)) - mgr.set("b", np.array(["bar"] * 3)) + mgr.insert(len(mgr.items), "d", np.array(["foo"] * 3)) + mgr.iset(1, np.array(["bar"] * 3)) tm.assert_numpy_array_equal(mgr.iget(0).internal_values(), np.array([0] * 3)) tm.assert_numpy_array_equal( mgr.iget(1).internal_values(), np.array(["bar"] * 3, dtype=np.object_) @@ -354,22 +354,22 @@ def test_set(self): ) def test_set_change_dtype(self, mgr): - mgr.set("baz", np.zeros(N, dtype=bool)) + mgr.insert(len(mgr.items), "baz", np.zeros(N, dtype=bool)) - mgr.set("baz", np.repeat("foo", N)) + mgr.iset(mgr.items.get_loc("baz"), np.repeat("foo", N)) idx = mgr.items.get_loc("baz") assert mgr.iget(idx).dtype == np.object_ mgr2 = mgr.consolidate() - mgr2.set("baz", np.repeat("foo", N)) + mgr2.iset(mgr2.items.get_loc("baz"), np.repeat("foo", N)) idx = mgr2.items.get_loc("baz") assert mgr2.iget(idx).dtype == np.object_ - mgr2.set("quux", tm.randn(N).astype(int)) + mgr2.insert(len(mgr2.items), "quux", tm.randn(N).astype(int)) idx = mgr2.items.get_loc("quux") assert mgr2.iget(idx).dtype == np.int_ - mgr2.set("quux", tm.randn(N)) + mgr2.iset(mgr2.items.get_loc("quux"), tm.randn(N)) assert mgr2.iget(idx).dtype == np.float_ def test_copy(self, mgr): @@ -496,9 +496,9 @@ def _compare(old_mgr, new_mgr): # convert mgr = create_mgr("a,b,foo: object; f: i8; g: f8") - mgr.set("a", np.array(["1"] * N, dtype=np.object_)) - mgr.set("b", np.array(["2."] * N, dtype=np.object_)) - mgr.set("foo", np.array(["foo."] * N, dtype=np.object_)) + mgr.iset(0, np.array(["1"] * N, dtype=np.object_)) + mgr.iset(1, np.array(["2."] * N, dtype=np.object_)) + mgr.iset(2, np.array(["foo."] * N, dtype=np.object_)) new_mgr = mgr.convert(numeric=True) assert new_mgr.iget(0).dtype == np.int64 assert new_mgr.iget(1).dtype == np.float64 @@ -509,9 +509,9 @@ def _compare(old_mgr, new_mgr): mgr = create_mgr( "a,b,foo: object; f: i4; bool: bool; dt: datetime; i: i8; g: f8; h: f2" ) - mgr.set("a", np.array(["1"] * N, dtype=np.object_)) - mgr.set("b", np.array(["2."] * N, dtype=np.object_)) - mgr.set("foo", np.array(["foo."] * N, dtype=np.object_)) + mgr.iset(0, np.array(["1"] * N, dtype=np.object_)) + mgr.iset(1, np.array(["2."] * N, dtype=np.object_)) + mgr.iset(2, np.array(["foo."] * N, dtype=np.object_)) new_mgr = mgr.convert(numeric=True) assert new_mgr.iget(0).dtype == np.int64 assert new_mgr.iget(1).dtype == np.float64 @@ -599,11 +599,11 @@ def test_interleave_dtype(self, mgr_string, dtype): assert mgr.as_array().dtype == "object" def test_consolidate_ordering_issues(self, mgr): - mgr.set("f", tm.randn(N)) - mgr.set("d", tm.randn(N)) - mgr.set("b", tm.randn(N)) - mgr.set("g", tm.randn(N)) - mgr.set("h", tm.randn(N)) + mgr.iset(mgr.items.get_loc("f"), tm.randn(N)) + mgr.iset(mgr.items.get_loc("d"), tm.randn(N)) + mgr.iset(mgr.items.get_loc("b"), tm.randn(N)) + mgr.iset(mgr.items.get_loc("g"), tm.randn(N)) + mgr.iset(mgr.items.get_loc("h"), tm.randn(N)) # we have datetime/tz blocks in mgr cons = mgr.consolidate() @@ -641,7 +641,7 @@ def test_get_numeric_data(self): "str: object; bool: bool; obj: object; dt: datetime", item_shape=(3,), ) - mgr.set("obj", np.array([1, 2, 3], dtype=np.object_)) + mgr.iset(5, np.array([1, 2, 3], dtype=np.object_)) numeric = mgr.get_numeric_data() tm.assert_index_equal( @@ -653,7 +653,7 @@ def test_get_numeric_data(self): ) # Check sharing - numeric.set("float", np.array([100.0, 200.0, 300.0])) + numeric.iset(numeric.items.get_loc("float"), np.array([100.0, 200.0, 300.0])) tm.assert_almost_equal( mgr.iget(mgr.items.get_loc("float")).internal_values(), np.array([100.0, 200.0, 300.0]), @@ -663,7 +663,9 @@ def test_get_numeric_data(self): tm.assert_index_equal( numeric.items, pd.Index(["int", "float", "complex", "bool"]) ) - numeric2.set("float", np.array([1000.0, 2000.0, 3000.0])) + numeric2.iset( + numeric2.items.get_loc("float"), np.array([1000.0, 2000.0, 3000.0]) + ) tm.assert_almost_equal( mgr.iget(mgr.items.get_loc("float")).internal_values(), np.array([100.0, 200.0, 300.0]), @@ -675,7 +677,7 @@ def test_get_bool_data(self): "str: object; bool: bool; obj: object; dt: datetime", item_shape=(3,), ) - mgr.set("obj", np.array([True, False, True], dtype=np.object_)) + mgr.iset(6, np.array([True, False, True], dtype=np.object_)) bools = mgr.get_bool_data() tm.assert_index_equal(bools.items, pd.Index(["bool"])) @@ -684,7 +686,7 @@ def test_get_bool_data(self): bools.iget(bools.items.get_loc("bool")).internal_values(), ) - bools.set("bool", np.array([True, False, True])) + bools.iset(0, np.array([True, False, True])) tm.assert_numpy_array_equal( mgr.iget(mgr.items.get_loc("bool")).internal_values(), np.array([True, False, True]), @@ -692,7 +694,7 @@ def test_get_bool_data(self): # Check sharing bools2 = mgr.get_bool_data(copy=True) - bools2.set("bool", np.array([False, True, False])) + bools2.iset(0, np.array([False, True, False])) tm.assert_numpy_array_equal( mgr.iget(mgr.items.get_loc("bool")).internal_values(), np.array([True, False, True]),
xref #33052, #33332. This touches several DataFrame/NDFrame methods. Those methods need a refactor in part because the ones defined on NDFrame are only ever called for DataFrame. Will refactor in follow-up.
https://api.github.com/repos/pandas-dev/pandas/pulls/33347
2020-04-06T23:10:14Z
2020-04-17T21:23:53Z
2020-04-17T21:23:53Z
2020-04-17T22:12:10Z
BUG: Timestamp comparison with ndarray[dt64]
diff --git a/pandas/_libs/tslibs/c_timestamp.pyx b/pandas/_libs/tslibs/c_timestamp.pyx index 04fadf220388f..68987030e8b4e 100644 --- a/pandas/_libs/tslibs/c_timestamp.pyx +++ b/pandas/_libs/tslibs/c_timestamp.pyx @@ -114,6 +114,18 @@ cdef class _Timestamp(datetime): return NotImplemented elif is_array(other): # avoid recursion error GH#15183 + if other.dtype.kind == "M": + if self.tz is None: + return PyObject_RichCompare(self.asm8, other, op) + raise TypeError( + "Cannot compare tz-naive and tz-aware timestamps" + ) + if other.dtype.kind == "O": + # Operate element-wise + return np.array( + [PyObject_RichCompare(self, x, op) for x in other], + dtype=bool, + ) return PyObject_RichCompare(np.array([self]), other, op) return PyObject_RichCompare(other, self, reverse_ops[op]) else: diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py index 4581e736b2ea1..27aef8c4a9eb7 100644 --- a/pandas/tests/scalar/timestamp/test_comparisons.py +++ b/pandas/tests/scalar/timestamp/test_comparisons.py @@ -5,9 +5,61 @@ import pytest from pandas import Timestamp +import pandas._testing as tm class TestTimestampComparison: + def test_comparison_dt64_ndarray(self): + ts = Timestamp.now() + ts2 = Timestamp("2019-04-05") + arr = np.array([[ts.asm8, ts2.asm8]], dtype="M8[ns]") + + result = ts == arr + expected = np.array([[True, False]], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + result = arr == ts + tm.assert_numpy_array_equal(result, expected) + + result = ts != arr + tm.assert_numpy_array_equal(result, ~expected) + + result = arr != ts + tm.assert_numpy_array_equal(result, ~expected) + + result = ts2 < arr + tm.assert_numpy_array_equal(result, expected) + + result = arr < ts2 + tm.assert_numpy_array_equal(result, np.array([[False, False]], dtype=bool)) + + result = ts2 <= arr + tm.assert_numpy_array_equal(result, np.array([[True, True]], dtype=bool)) + + result = arr <= ts2 + tm.assert_numpy_array_equal(result, ~expected) + + result = ts >= arr + tm.assert_numpy_array_equal(result, np.array([[True, True]], dtype=bool)) + + result = arr >= ts + tm.assert_numpy_array_equal(result, np.array([[True, False]], dtype=bool)) + + @pytest.mark.parametrize("reverse", [True, False]) + def test_comparison_dt64_ndarray_tzaware(self, reverse, all_compare_operators): + op = getattr(operator, all_compare_operators.strip("__")) + + ts = Timestamp.now("UTC") + arr = np.array([ts.asm8, ts.asm8], dtype="M8[ns]") + + left, right = ts, arr + if reverse: + left, right = arr, ts + + msg = "Cannot compare tz-naive and tz-aware timestamps" + with pytest.raises(TypeError, match=msg): + op(left, right) + def test_comparison_object_array(self): # GH#15183 ts = Timestamp("2011-01-03 00:00:00-0500", tz="US/Eastern")
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33346
2020-04-06T23:05:39Z
2020-04-09T23:23:52Z
2020-04-09T23:23:51Z
2020-04-09T23:40:17Z
TST: add read_json test #GH32383
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index c0d40048a72fe..a379771fc60e2 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -237,6 +237,24 @@ def test_build_series(self): assert result == expected + def test_read_json_from_to_json_results(self): + # GH32383 + df = pd.DataFrame( + { + "_id": {"row_0": 0}, + "category": {"row_0": "Goods"}, + "recommender_id": {"row_0": 3}, + "recommender_name_jp": {"row_0": "浦田"}, + "recommender_name_en": {"row_0": "Urata"}, + "name_jp": {"row_0": "博多人形(松尾吉将まつお よしまさ)"}, + "name_en": {"row_0": "Hakata Dolls Matsuo"}, + } + ) + result1 = pd.read_json(df.to_json()) + result2 = pd.DataFrame.from_dict(json.loads(df.to_json())) + tm.assert_frame_equal(result1, df) + tm.assert_frame_equal(result2, df) + def test_to_json(self): df = self.df.copy() df.index.name = "idx"
- [x] closes #32383 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33345
2020-04-06T23:03:42Z
2020-04-12T21:50:14Z
2020-04-12T21:50:14Z
2020-04-14T20:06:20Z
TST: add read_json test for issue #32383
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 1529a259c49af..e109c7a4f1c8d 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -1,4 +1,4 @@ -from datetime import date +from datetime import date, timedelta import dateutil import numpy as np @@ -44,6 +44,45 @@ def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self): assert str(index.reindex([])[0].tz) == "US/Eastern" assert str(index.reindex(np.array([]))[0].tz) == "US/Eastern" + def test_reindex_with_same_tz(self): + # GH 32740 + rng_a = date_range("2010-01-01", "2010-01-02", periods=24, tz="utc") + rng_b = date_range("2010-01-01", "2010-01-02", periods=23, tz="utc") + result1, result2 = rng_a.reindex( + rng_b, method="nearest", tolerance=timedelta(seconds=20) + ) + expected_list1 = [ + "2010-01-01 00:00:00", + "2010-01-01 01:05:27.272727272", + "2010-01-01 02:10:54.545454545", + "2010-01-01 03:16:21.818181818", + "2010-01-01 04:21:49.090909090", + "2010-01-01 05:27:16.363636363", + "2010-01-01 06:32:43.636363636", + "2010-01-01 07:38:10.909090909", + "2010-01-01 08:43:38.181818181", + "2010-01-01 09:49:05.454545454", + "2010-01-01 10:54:32.727272727", + "2010-01-01 12:00:00", + "2010-01-01 13:05:27.272727272", + "2010-01-01 14:10:54.545454545", + "2010-01-01 15:16:21.818181818", + "2010-01-01 16:21:49.090909090", + "2010-01-01 17:27:16.363636363", + "2010-01-01 18:32:43.636363636", + "2010-01-01 19:38:10.909090909", + "2010-01-01 20:43:38.181818181", + "2010-01-01 21:49:05.454545454", + "2010-01-01 22:54:32.727272727", + "2010-01-02 00:00:00", + ] + expected1 = DatetimeIndex( + expected_list1, dtype="datetime64[ns, UTC]", freq=None, + ) + expected2 = np.array([0] + [-1] * 21 + [23], dtype=np.int64,) + tm.assert_index_equal(result1, expected1) + tm.assert_numpy_array_equal(result2, expected2) + def test_time_loc(self): # GH8667 from datetime import time from pandas._libs.index import _SIZE_CUTOFF diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index c0d40048a72fe..dccfc5c717522 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -237,6 +237,22 @@ def test_build_series(self): assert result == expected + def test_read_json(self): + df = pd.DataFrame( + { + "_id": {"0": "0"}, + "category": {"0": "Goods"}, + "recommender_id": {"0": "3"}, + "recommender_name_en": {"0": "Urata"}, + "name_en": {"0": "Hakata Dolls Matsuo"}, + } + ) + df_json = df.to_json() + df_dict = json.loads(df_json) + result = pd.read_json(df_json) + expected = pd.DataFrame().from_dict(df_dict) + tm.assert_frame_equal(result, expected) + def test_to_json(self): df = self.df.copy() df.index.name = "idx"
- [x] closes #32383 - [ ] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33344
2020-04-06T22:39:38Z
2020-04-06T22:41:25Z
null
2020-04-06T22:46:54Z
CLN: Replace first_not_none function with default argument to next
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 093c925acbc49..25c57b7847656 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1193,20 +1193,14 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): key_names = self.grouper.names - # GH12824. - def first_not_none(values): - try: - return next(com.not_none(*values)) - except StopIteration: - return None - - v = first_not_none(values) + # GH12824 + first_not_none = next(com.not_none(*values), None) - if v is None: + if first_not_none is None: # GH9684. If all values are None, then this will throw an error. # We'd prefer it return an empty dataframe. return DataFrame() - elif isinstance(v, DataFrame): + elif isinstance(first_not_none, DataFrame): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) elif self.grouper.groupings is not None: if len(self.grouper.groupings) > 1: @@ -1223,6 +1217,9 @@ def first_not_none(values): # reorder the values values = [values[i] for i in indexer] + + # update due to the potential reorder + first_not_none = next(com.not_none(*values), None) else: key_index = Index(keys, name=key_names[0]) @@ -1232,20 +1229,19 @@ def first_not_none(values): key_index = None # make Nones an empty object - v = first_not_none(values) - if v is None: + if first_not_none is None: return DataFrame() - elif isinstance(v, NDFrame): + elif isinstance(first_not_none, NDFrame): # this is to silence a DeprecationWarning # TODO: Remove when default dtype of empty Series is object - kwargs = v._construct_axes_dict() - if v._constructor is Series: + kwargs = first_not_none._construct_axes_dict() + if first_not_none._constructor is Series: backup = create_series_with_explicit_dtype( **kwargs, dtype_if_empty=object ) else: - backup = v._constructor(**kwargs) + backup = first_not_none._constructor(**kwargs) values = [x if (x is not None) else backup for x in values]
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33343
2020-04-06T22:19:09Z
2020-04-10T21:48:55Z
2020-04-10T21:48:55Z
2020-07-11T16:02:13Z
BUG: scalar indexing on 2D DTA/TDA/PA
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 4fabd8f558fee..ce42fad31ef78 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -574,6 +574,8 @@ def __getitem__(self, key): freq = self.freq result = getitem(key) + if lib.is_scalar(result): + return self._box_func(result) return self._simple_new(result, dtype=self.dtype, freq=freq) def __setitem__( diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c23f78d845cfd..3221882f09eea 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -6,7 +6,7 @@ import numpy as np -from pandas._libs import NaT, Timestamp, algos as libalgos, lib, writers +from pandas._libs import NaT, algos as libalgos, lib, writers import pandas._libs.internals as libinternals from pandas._libs.tslibs import Timedelta, conversion from pandas._libs.tslibs.timezones import tz_compare @@ -2023,12 +2023,7 @@ def array_values(self): def iget(self, key): # GH#31649 we need to wrap scalars in Timestamp/Timedelta # TODO(EA2D): this can be removed if we ever have 2D EA - result = super().iget(key) - if isinstance(result, np.datetime64): - result = Timestamp(result) - elif isinstance(result, np.timedelta64): - result = Timedelta(result) - return result + return self.array_values().reshape(self.shape)[key] def shift(self, periods, axis=0, fill_value=None): # TODO(EA2D) this is unnecessary if these blocks are backed by 2D EAs diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index fe35344f46688..88263997d7784 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -222,6 +222,11 @@ def test_getitem_2d(self, arr1d): result = arr2d[:3, 0] tm.assert_equal(result, expected) + # Scalar lookup + result = arr2d[-1, 0] + expected = arr1d[-1] + assert result == expected + def test_setitem(self): data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9 arr = self.array_cls(data, freq="D")
cc @jorisvandenbossche this fixes the issue with `DatetimeLikeBlockMixin.iget` discussed in #33252 Note: this sits on top of #33290.
https://api.github.com/repos/pandas-dev/pandas/pulls/33342
2020-04-06T21:57:35Z
2020-04-10T16:03:43Z
2020-04-10T16:03:43Z
2020-04-10T17:40:19Z
CI: Sync web and dev docs automatically with prod server
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 025b6f1813df7..1b9d28b4f2d69 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -125,32 +125,18 @@ jobs: - name: Check ipython directive errors run: "! grep -B1 \"^<<<-------------------------------------------------------------------------$\" sphinx.log" - - name: Install Rclone - run: sudo apt install rclone -y - if: github.event_name == 'push' - - - name: Set up Rclone + - name: Install ssh key run: | - CONF=$HOME/.config/rclone/rclone.conf - mkdir -p `dirname $CONF` - echo "[ovh_host]" > $CONF - echo "type = swift" >> $CONF - echo "env_auth = false" >> $CONF - echo "auth_version = 3" >> $CONF - echo "auth = https://auth.cloud.ovh.net/v3/" >> $CONF - echo "endpoint_type = public" >> $CONF - echo "tenant_domain = default" >> $CONF - echo "tenant = 2977553886518025" >> $CONF - echo "domain = default" >> $CONF - echo "user = w4KGs3pmDxpd" >> $CONF - echo "key = ${{ secrets.ovh_object_store_key }}" >> $CONF - echo "region = BHS" >> $CONF + mkdir -m 700 -p ~/.ssh + echo "${{ secrets.server_ssh_key }}" > ~/.ssh/id_rsa + chmod 600 ~/.ssh/id_rsa + echo "${{ secrets.server_ip }} ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBE1Kkopomm7FHG5enATf7SgnpICZ4W2bw+Ho+afqin+w7sMcrsa0je7sbztFAV8YchDkiBKnWTG4cRT+KZgZCaY=" > ~/.ssh/known_hosts if: github.event_name == 'push' - - name: Sync web with OVH - run: rclone sync --exclude pandas-docs/** web/build ovh_host:prod + - name: Upload web + run: rsync -az --delete --exclude='pandas-docs' --exclude='Pandas_Cheat_Sheet*' web/build/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas if: github.event_name == 'push' - - name: Sync dev docs with OVH - run: rclone sync doc/build/html ovh_host:prod/pandas-docs/dev + - name: Upload dev docs + run: rsync -az --delete doc/build/html/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas/pandas-docs/dev if: github.event_name == 'push'
- [X] closes #28528 This forgets about the OVH server, and after every merge to master will synchronize the repo web and docs in the production server. **Note** that this will delete any file in the server that is not known, except the cheat sheets, and all the docs versions in `pandas-doc`. I don't think anything else should be kept, but if there is anything else that we upload manually, please let me know. I made a backup in the server, so if we delete anything by accident with this, it can be easily restored. CC: @TomAugspurger @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/33341
2020-04-06T21:35:09Z
2020-04-07T11:57:42Z
2020-04-07T11:57:42Z
2020-04-07T11:57:43Z
REF: replace column-wise, remove BlockManager.apply filter kwarg
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index aedbba755227d..d2da52ba7bdd0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4025,6 +4025,41 @@ def replace( method=method, ) + def _replace_columnwise( + self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex + ): + """ + Dispatch to Series.replace column-wise. + + + Parameters + ---------- + mapping : dict + of the form {col: (target, value)} + inplace : bool + regex : bool or same types as `to_replace` in DataFrame.replace + + Returns + ------- + DataFrame or None + """ + # Operate column-wise + res = self if inplace else self.copy() + ax = self.columns + + for i in range(len(ax)): + if ax[i] in mapping: + ser = self.iloc[:, i] + + target, value = mapping[ax[i]] + newobj = ser.replace(target, value, regex=regex) + + res.iloc[:, i] = newobj + + if inplace: + return + return res.__finalize__(self) + @Appender(_shared_docs["shift"] % _shared_doc_kwargs) def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "DataFrame": return super().shift( diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 052a4adddca27..a36aca5ea7f1c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6468,7 +6468,6 @@ def replace( ): if not ( is_scalar(to_replace) - or isinstance(to_replace, pd.Series) or is_re_compilable(to_replace) or is_list_like(to_replace) ): @@ -6559,18 +6558,16 @@ def replace( # {'A': NA} -> 0 elif not is_list_like(value): - keys = [(k, src) for k, src in to_replace.items() if k in self] - keys_len = len(keys) - 1 - for i, (k, src) in enumerate(keys): - convert = i == keys_len - new_data = new_data.replace( - to_replace=src, - value=value, - filter=[k], - inplace=inplace, - regex=regex, - convert=convert, + # Operate column-wise + if self.ndim == 1: + raise ValueError( + "Series.replace cannot use dict-like to_replace " + "and non-None value" ) + mapping = { + col: (to_rep, value) for col, to_rep in to_replace.items() + } + return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") @@ -6611,17 +6608,14 @@ def replace( # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} - new_data = self._mgr - - for k, v in value.items(): - if k in self: - new_data = new_data.replace( - to_replace=to_replace, - value=v, - filter=[k], - inplace=inplace, - regex=regex, - ) + # Operate column-wise + if self.ndim == 1: + raise ValueError( + "Series.replace cannot use dict-value and " + "non-None to_replace" + ) + mapping = {col: (to_replace, val) for col, val in value.items()} + return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 new_data = self._mgr.replace( diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ba2fd037901a2..d22209e61df0c 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -679,7 +679,6 @@ def replace( to_replace, value, inplace: bool = False, - filter=None, regex: bool = False, convert: bool = True, ): @@ -711,12 +710,7 @@ def replace( # _can_hold_element checks have reduced this back to the # scalar case and we can avoid a costly object cast return self.replace( - to_replace[0], - value, - inplace=inplace, - filter=filter, - regex=regex, - convert=convert, + to_replace[0], value, inplace=inplace, regex=regex, convert=convert, ) # GH 22083, TypeError or ValueError occurred within error handling @@ -730,7 +724,6 @@ def replace( to_replace=to_replace, value=value, inplace=inplace, - filter=filter, regex=regex, convert=convert, ) @@ -743,9 +736,6 @@ def replace( to_replace = convert_scalar_for_putitemlike(to_replace, values.dtype) mask = missing.mask_missing(values, to_replace) - if filter is not None: - filtered_out = ~self.mgr_locs.isin(filter) - mask[filtered_out.nonzero()[0]] = False if not mask.any(): if inplace: @@ -774,7 +764,6 @@ def replace( to_replace=original_to_replace, value=value, inplace=inplace, - filter=filter, regex=regex, convert=convert, ) @@ -2374,20 +2363,13 @@ def _can_hold_element(self, element: Any) -> bool: return issubclass(tipo.type, np.bool_) return isinstance(element, (bool, np.bool_)) - def replace( - self, to_replace, value, inplace=False, filter=None, regex=False, convert=True - ): + def replace(self, to_replace, value, inplace=False, regex=False, convert=True): inplace = validate_bool_kwarg(inplace, "inplace") to_replace_values = np.atleast_1d(to_replace) if not np.can_cast(to_replace_values, bool): return self return super().replace( - to_replace, - value, - inplace=inplace, - filter=filter, - regex=regex, - convert=convert, + to_replace, value, inplace=inplace, regex=regex, convert=convert, ) @@ -2461,9 +2443,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"] def _can_hold_element(self, element: Any) -> bool: return True - def replace( - self, to_replace, value, inplace=False, filter=None, regex=False, convert=True - ): + def replace(self, to_replace, value, inplace=False, regex=False, convert=True): to_rep_is_list = is_list_like(to_replace) value_is_list = is_list_like(value) both_lists = to_rep_is_list and value_is_list @@ -2474,33 +2454,18 @@ def replace( if not either_list and is_re(to_replace): return self._replace_single( - to_replace, - value, - inplace=inplace, - filter=filter, - regex=True, - convert=convert, + to_replace, value, inplace=inplace, regex=True, convert=convert, ) elif not (either_list or regex): return super().replace( - to_replace, - value, - inplace=inplace, - filter=filter, - regex=regex, - convert=convert, + to_replace, value, inplace=inplace, regex=regex, convert=convert, ) elif both_lists: for to_rep, v in zip(to_replace, value): result_blocks = [] for b in blocks: result = b._replace_single( - to_rep, - v, - inplace=inplace, - filter=filter, - regex=regex, - convert=convert, + to_rep, v, inplace=inplace, regex=regex, convert=convert, ) result_blocks = _extend_blocks(result, result_blocks) blocks = result_blocks @@ -2511,35 +2476,18 @@ def replace( result_blocks = [] for b in blocks: result = b._replace_single( - to_rep, - value, - inplace=inplace, - filter=filter, - regex=regex, - convert=convert, + to_rep, value, inplace=inplace, regex=regex, convert=convert, ) result_blocks = _extend_blocks(result, result_blocks) blocks = result_blocks return result_blocks return self._replace_single( - to_replace, - value, - inplace=inplace, - filter=filter, - convert=convert, - regex=regex, + to_replace, value, inplace=inplace, convert=convert, regex=regex, ) def _replace_single( - self, - to_replace, - value, - inplace=False, - filter=None, - regex=False, - convert=True, - mask=None, + self, to_replace, value, inplace=False, regex=False, convert=True, mask=None, ): """ Replace elements by the given value. @@ -2552,7 +2500,6 @@ def _replace_single( Replacement object. inplace : bool, default False Perform inplace modification. - filter : list, optional regex : bool, default False If true, perform regular expression substitution. convert : bool, default True @@ -2598,9 +2545,7 @@ def _replace_single( else: # if the thing to replace is not a string or compiled regex call # the superclass method -> to_replace is some kind of object - return super().replace( - to_replace, value, inplace=inplace, filter=filter, regex=regex - ) + return super().replace(to_replace, value, inplace=inplace, regex=regex) new_values = self.values if inplace else self.values.copy() @@ -2625,15 +2570,10 @@ def re_replacer(s): f = np.vectorize(re_replacer, otypes=[self.dtype]) - if filter is None: - filt = slice(None) - else: - filt = self.mgr_locs.isin(filter).nonzero()[0] - if mask is None: - new_values[filt] = f(new_values[filt]) + new_values[:] = f(new_values) else: - new_values[filt][mask] = f(new_values[filt][mask]) + new_values[mask] = f(new_values[mask]) # convert block = self.make_block(new_values) @@ -2730,7 +2670,6 @@ def replace( to_replace, value, inplace: bool = False, - filter=None, regex: bool = False, convert: bool = True, ): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e2c9212ae9576..f241410b25a82 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -369,7 +369,7 @@ def reduce(self, func, *args, **kwargs): return res - def apply(self: T, f, filter=None, align_keys=None, **kwargs) -> T: + def apply(self: T, f, align_keys=None, **kwargs) -> T: """ Iterate over the blocks, collect and create a new BlockManager. @@ -377,26 +377,17 @@ def apply(self: T, f, filter=None, align_keys=None, **kwargs) -> T: ---------- f : str or callable Name of the Block method to apply. - filter : list, if supplied, only call the block if the filter is in - the block Returns ------- BlockManager """ + assert "filter" not in kwargs + align_keys = align_keys or [] - result_blocks = [] + result_blocks: List[Block] = [] # fillna: Series/DataFrame is responsible for making sure value is aligned - # filter kwarg is used in replace-* family of methods - if filter is not None: - filter_locs = set(self.items.get_indexer_for(filter)) - if len(filter_locs) == len(self.items): - # All items are included, as if there were no filtering - filter = None - else: - kwargs["filter"] = filter_locs - self._consolidate_inplace() align_copy = False @@ -410,10 +401,6 @@ def apply(self: T, f, filter=None, align_keys=None, **kwargs) -> T: } for b in self.blocks: - if filter is not None: - if not b.mgr_locs.isin(filter_locs).any(): - result_blocks.append(b) - continue if aligned_args: b_items = self.items[b.mgr_locs.indexer] diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index 904a455870ab1..bea8cb8b105e7 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -373,3 +373,19 @@ def test_replace_invalid_to_replace(self): ) with pytest.raises(TypeError, match=msg): series.replace(lambda x: x.strip()) + + def test_replace_only_one_dictlike_arg(self): + # GH#33340 + + ser = pd.Series([1, 2, "A", pd.Timestamp.now(), True]) + to_replace = {0: 1, 2: "A"} + value = "foo" + msg = "Series.replace cannot use dict-like to_replace and non-None value" + with pytest.raises(ValueError, match=msg): + ser.replace(to_replace, value) + + to_replace = 1 + value = {0: "foo", 2: "bar"} + msg = "Series.replace cannot use dict-value and non-None to_replace" + with pytest.raises(ValueError, match=msg): + ser.replace(to_replace, value)
Follow-up to #33279.
https://api.github.com/repos/pandas-dev/pandas/pulls/33340
2020-04-06T21:27:05Z
2020-04-08T17:29:28Z
2020-04-08T17:29:28Z
2020-04-08T17:33:31Z
BUG: Don't raise on value_counts for empty Int64
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 7cb7db27ae603..6ec02a108a879 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -461,7 +461,7 @@ Sparse ExtensionArray ^^^^^^^^^^^^^^ -- +- Fixed bug where :meth:`Serires.value_counts` would raise on empty input of ``Int64`` dtype (:issue:`33317`) - diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 4f3c68aa03b16..f5189068d5da1 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -499,7 +499,8 @@ def _values_for_argsort(self) -> np.ndarray: ExtensionArray.argsort """ data = self._data.copy() - data[self._mask] = data.min() - 1 + if self._mask.any(): + data[self._mask] = data.min() - 1 return data @classmethod diff --git a/pandas/tests/arrays/integer/test_function.py b/pandas/tests/arrays/integer/test_function.py index 58913189593a9..bdf902d1aca62 100644 --- a/pandas/tests/arrays/integer/test_function.py +++ b/pandas/tests/arrays/integer/test_function.py @@ -103,6 +103,16 @@ def test_value_counts_na(): tm.assert_series_equal(result, expected) +def test_value_counts_empty(): + # https://github.com/pandas-dev/pandas/issues/33317 + s = pd.Series([], dtype="Int64") + result = s.value_counts() + # TODO: The dtype of the index seems wrong (it's int64 for non-empty) + idx = pd.Index([], dtype="object") + expected = pd.Series([], index=idx, dtype="Int64") + tm.assert_series_equal(result, expected) + + # TODO(jreback) - these need testing / are broken # shift
- [ ] closes #33317 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33339
2020-04-06T21:02:15Z
2020-04-06T21:58:59Z
2020-04-06T21:58:59Z
2020-04-06T22:17:45Z
DOC/PLT: Add `stacked` in doc and doc example for barh and bar plot
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index e466a215091ea..b3c4d3138e915 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -985,6 +985,13 @@ def line(self, x=None, y=None, **kwargs): ... 'lifespan': lifespan}, index=index) >>> ax = df.plot.bar(rot=0) + Plot stacked bar charts for the DataFrame + + .. plot:: + :context: close-figs + + >>> ax = df.plot.bar(stacked=True) + Instead of nesting, the figure can be split by column with ``subplots=True``. In this case, a :class:`numpy.ndarray` of :class:`matplotlib.axes.Axes` are returned. @@ -1066,6 +1073,13 @@ def bar(self, x=None, y=None, **kwargs): ... 'lifespan': lifespan}, index=index) >>> ax = df.plot.barh() + Plot stacked barh charts for the DataFrame + + .. plot:: + :context: close-figs + + >>> ax = df.plot.barh(stacked=True) + We can specify colors for each column .. plot::
- [x] closes #32759 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33337
2020-04-06T19:32:42Z
2020-05-06T21:30:51Z
2020-05-06T21:30:51Z
2020-05-06T21:31:00Z
PLT: Order of plots does not preserve the column orders in df.hist
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index aa02c3bb9a1f8..7cde357562af4 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -686,6 +686,7 @@ Plotting - :func:`.plot` for line/bar now accepts color by dictonary (:issue:`8193`). - Bug in :meth:`DataFrame.plot.hist` where weights are not working for multiple columns (:issue:`33173`) - Bug in :meth:`DataFrame.boxplot` and :meth:`DataFrame.plot.boxplot` lost color attributes of ``medianprops``, ``whiskerprops``, ``capprops`` and ``medianprops`` (:issue:`30346`) +- Bug in :meth:`DataFrame.hist` where the order of ``column`` argument was ignored (:issue:`29235`) - Bug in :meth:`DataFrame.plot.scatter` that when adding multiple plots with different ``cmap``, colorbars alway use the first ``cmap`` (:issue:`33389`) diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index 3a0cdc90dfd5c..b0ce43dc2eb36 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -4,8 +4,6 @@ from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass from pandas.core.dtypes.missing import isna, remove_na_arraylike -import pandas.core.common as com - from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import LinePlot, MPLPlot from pandas.plotting._matplotlib.tools import _flatten, _set_ticks_props, _subplots @@ -403,7 +401,7 @@ def hist_frame( ) _axes = _flatten(axes) - for i, col in enumerate(com.try_sort(data.columns)): + for i, col in enumerate(data.columns): ax = _axes[i] ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index fba4f07f6cc0f..5a30e9fbb91c6 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -269,6 +269,30 @@ def test_hist_subplot_xrot(self): ) self._check_ticks_props(axes, xrot=0) + @pytest.mark.parametrize( + "column, expected", + [ + (None, ["width", "length", "height"]), + (["length", "width", "height"], ["length", "width", "height"]), + ], + ) + def test_hist_column_order_unchanged(self, column, expected): + # GH29235 + + df = DataFrame( + { + "width": [0.7, 0.2, 0.15, 0.2, 1.1], + "length": [1.5, 0.5, 1.2, 0.9, 3], + "height": [3, 0.5, 3.4, 2, 1], + }, + index=["pig", "rabbit", "duck", "chicken", "horse"], + ) + + axes = _check_plot_works(df.hist, column=column, layout=(1, 3)) + result = [axes[0, i].get_title() for i in range(3)] + + assert result == expected + @td.skip_if_no_mpl class TestDataFrameGroupByPlots(TestPlotBase):
- [x] closes #29235 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33336
2020-04-06T19:00:09Z
2020-05-05T00:50:16Z
2020-05-05T00:50:16Z
2020-05-19T08:12:15Z
BLD: Increase minimum version of Cython to 0.29.16
diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml index cf3fca307481f..15704cf0d5427 100644 --- a/ci/deps/azure-36-32bit.yaml +++ b/ci/deps/azure-36-32bit.yaml @@ -22,5 +22,5 @@ dependencies: # see comment above - pip - pip: - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml index 810554632a507..56da56b45b702 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-36-locale.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml index 48ac50c001715..c086b3651afc3 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-36-locale_slow.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-36-minimum_versions.yaml b/ci/deps/azure-36-minimum_versions.yaml index de7e011d9c7ca..0e0ebe5c75218 100644 --- a/ci/deps/azure-36-minimum_versions.yaml +++ b/ci/deps/azure-36-minimum_versions.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.1 # tools - - cython=0.29.13 + - cython=0.29.16 - pytest=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index dc51597a33209..31155ac93931a 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index a04bdc2448bce..29ebfe2639e32 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -5,7 +5,6 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.13 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 @@ -15,6 +14,7 @@ dependencies: - pytz - pip - pip: + - cython>=0.29.16 - "git+git://github.com/dateutil/dateutil.git" - "-f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" - "--pre" diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-36.yaml index 90980133b31c1..279f44b06bd02 100644 --- a/ci/deps/azure-macos-36.yaml +++ b/ci/deps/azure-macos-36.yaml @@ -5,7 +5,6 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.13 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 @@ -32,5 +31,6 @@ dependencies: - xlwt - pip - pip: + - cython>=0.29.16 - pyreadstat - pyxlsb diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 663c55492e69e..548660cabaa67 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 6b3ad6f560292..e491fd57b240b 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index 6883301a63a9b..2968c8f188d49 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 @@ -15,7 +15,7 @@ dependencies: # pandas dependencies - beautifulsoup4 - botocore>=1.11 - - cython>=0.29.13 + - cython>=0.29.16 - dask - fastparquet>=0.3.2 - gcsfs diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index d0bc046575953..3fc19f1bca084 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml index 1dfd90d0904ac..df693f0e22c71 100644 --- a/ci/deps/travis-36-slow.yaml +++ b/ci/deps/travis-36-slow.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.6.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index 682b1016ff3a2..986728d0a4a40 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-38.yaml b/ci/deps/travis-38.yaml index a627b7edc175f..b879c0f81dab2 100644 --- a/ci/deps/travis-38.yaml +++ b/ci/deps/travis-38.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.8.* # tools - - cython>=0.29.13 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 7cb7db27ae603..d45e4d2f6ca0e 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -92,6 +92,11 @@ Other enhancements .. --------------------------------------------------------------------------- +Development Changes +^^^^^^^^^^^^^^^^^^^ + +- The minimum version of Cython is now the most recent bug-fix version (0.29.16) (:issue:`33334`). + .. _whatsnew_110.api.other: Other API changes diff --git a/environment.yml b/environment.yml index cf579738f6fe9..b80a004e6cb99 100644 --- a/environment.yml +++ b/environment.yml @@ -12,7 +12,7 @@ dependencies: - asv # building - - cython>=0.29.13 + - cython>=0.29.16 # code checks - black=19.10b0 diff --git a/pyproject.toml b/pyproject.toml index 28d7c3d55c919..696785599d7da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = [ "setuptools", "wheel", - "Cython>=0.29.13", # Note: sync with setup.py + "Cython>=0.29.16", # Note: sync with setup.py "numpy==1.13.3; python_version=='3.6' and platform_system!='AIX'", "numpy==1.14.5; python_version>='3.7' and platform_system!='AIX'", "numpy==1.16.0; python_version=='3.6' and platform_system=='AIX'", diff --git a/requirements-dev.txt b/requirements-dev.txt index 6a2cc7b53615e..4fda019987469 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,7 +5,7 @@ numpy>=1.15 python-dateutil>=2.6.1 pytz asv -cython>=0.29.13 +cython>=0.29.16 black==19.10b0 cpplint flake8 diff --git a/setup.py b/setup.py index 461ef005c3df3..089baae123d2a 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ def is_platform_mac(): min_numpy_ver = "1.13.3" -min_cython_ver = "0.29.13" # note: sync with pyproject.toml +min_cython_ver = "0.29.16" # note: sync with pyproject.toml try: import Cython
- In particular, this fixes a bug in code returning ctuples - cython/cython#2745 - cython/cython#1427 - This is a prereq for #33220 - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33334
2020-04-06T18:26:27Z
2020-04-07T23:24:23Z
2020-04-07T23:24:22Z
2020-04-08T04:52:18Z
REF: BlockManager.delete -> idelete
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fac4ca6768ece..3363d22686f96 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3713,7 +3713,8 @@ def __delitem__(self, key) -> None: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: - self._mgr.delete(key) + loc = self.axes[-1].get_loc(key) + self._mgr.idelete(loc) # delete from the caches try: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index c6efd6a2ac6a7..b2c43be21771e 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1007,12 +1007,10 @@ def iget(self, i: int) -> "SingleBlockManager": self.axes[1], ) - def delete(self, item): + def idelete(self, indexer): """ - Delete selected item (items if non-unique) in-place. + Delete selected locations in-place (new block and array, same BlockManager) """ - indexer = self.items.get_loc(item) - is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True ref_loc_offset = -is_deleted.cumsum() @@ -1606,15 +1604,14 @@ def _consolidate_check(self): def _consolidate_inplace(self): pass - def delete(self, item): + def idelete(self, indexer): """ - Delete single item from SingleBlockManager. + Delete single location from SingleBlockManager. Ensures that self.blocks doesn't become empty. """ - loc = self.items.get_loc(item) - self._block.delete(loc) - self.axes[0] = self.axes[0].delete(loc) + self._block.delete(indexer) + self.axes[0] = self.axes[0].delete(indexer) def fast_xs(self, loc): """
Moving towards all-locational inside BlockManager, also makes it easier to grep for where methods are used.
https://api.github.com/repos/pandas-dev/pandas/pulls/33332
2020-04-06T17:49:35Z
2020-04-06T21:22:21Z
2020-04-06T21:22:21Z
2020-04-06T21:28:06Z
CLN: Static types in `pandas/_lib/lib.pyx`
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 276c2d5198831..6147d6d9c1658 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1001,34 +1001,34 @@ cdef inline bint c_is_list_like(object obj, bint allow_sets): _TYPE_MAP = { - 'categorical': 'categorical', - 'category': 'categorical', - 'int8': 'integer', - 'int16': 'integer', - 'int32': 'integer', - 'int64': 'integer', - 'i': 'integer', - 'uint8': 'integer', - 'uint16': 'integer', - 'uint32': 'integer', - 'uint64': 'integer', - 'u': 'integer', - 'float32': 'floating', - 'float64': 'floating', - 'f': 'floating', - 'complex64': 'complex', - 'complex128': 'complex', - 'c': 'complex', - 'string': 'string', - 'S': 'bytes', - 'U': 'string', - 'bool': 'boolean', - 'b': 'boolean', - 'datetime64[ns]': 'datetime64', - 'M': 'datetime64', - 'timedelta64[ns]': 'timedelta64', - 'm': 'timedelta64', - 'interval': 'interval', + "categorical": "categorical", + "category": "categorical", + "int8": "integer", + "int16": "integer", + "int32": "integer", + "int64": "integer", + "i": "integer", + "uint8": "integer", + "uint16": "integer", + "uint32": "integer", + "uint64": "integer", + "u": "integer", + "float32": "floating", + "float64": "floating", + "f": "floating", + "complex64": "complex", + "complex128": "complex", + "c": "complex", + "string": "string", + "S": "bytes", + "U": "string", + "bool": "boolean", + "b": "boolean", + "datetime64[ns]": "datetime64", + "M": "datetime64", + "timedelta64[ns]": "timedelta64", + "m": "timedelta64", + "interval": "interval", } # types only exist on certain platform @@ -1173,12 +1173,13 @@ cdef class Seen: or self.nat_) -cdef _try_infer_map(v): +cdef object _try_infer_map(object v): """ If its in our map, just return the dtype. """ cdef: - object attr, val + object val + str attr for attr in ['name', 'kind', 'base']: val = getattr(v.dtype, attr) if val in _TYPE_MAP:
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33329
2020-04-06T16:23:39Z
2020-04-10T16:14:06Z
2020-04-10T16:14:06Z
2020-04-10T16:16:25Z
DOC: Fix examples in `pandas/core/strings.py`
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index cd9e4384fd0d9..1bdbbb54a0aac 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -296,6 +296,10 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then pytest -q --doctest-modules pandas/core/series.py RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests strings.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/strings.py + RET=$(($RET + $?)) ; echo $MSG "DONE" + # Directories MSG='Doctests arrays'; echo $MSG diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 59b8b37f72695..52d9a81489db4 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -652,9 +652,9 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): To get the idea: >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr) - 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo - 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz - 2 NaN + 0 <re.Match object; span=(0, 1), match='f'>oo + 1 <re.Match object; span=(0, 1), match='f'>uz + 2 NaN dtype: object Reverse every lowercase alphabetic word: @@ -2076,8 +2076,18 @@ class StringMethods(NoNewAttributesMixin): Examples -------- - >>> s.str.split('_') - >>> s.str.replace('_', '') + >>> s = pd.Series(["A_Str_Series"]) + >>> s + 0 A_Str_Series + dtype: object + + >>> s.str.split("_") + 0 [A, Str, Series] + dtype: object + + >>> s.str.replace("_", "") + 0 AStrSeries + dtype: object """ def __init__(self, data): @@ -2583,9 +2593,14 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): Examples -------- - >>> s = pd.Series(["this is a regular sentence", - ... "https://docs.python.org/3/tutorial/index.html", - ... np.nan]) + >>> s = pd.Series( + ... [ + ... "this is a regular sentence", + ... "https://docs.python.org/3/tutorial/index.html", + ... np.nan + ... ] + ... ) + >>> s 0 this is a regular sentence 1 https://docs.python.org/3/tutorial/index.html 2 NaN @@ -2625,7 +2640,7 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): The `pat` parameter can be used to split by other characters. - >>> s.str.split(pat = "/") + >>> s.str.split(pat="/") 0 [this is a regular sentence] 1 [https:, , docs.python.org, 3, tutorial, index... 2 NaN @@ -2636,14 +2651,10 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): the columns during the split. >>> s.str.split(expand=True) - 0 1 2 3 - 0 this is a regular - 1 https://docs.python.org/3/tutorial/index.html None None None - 2 NaN NaN NaN NaN \ - 4 - 0 sentence - 1 None - 2 NaN + 0 1 2 3 4 + 0 this is a regular sentence + 1 https://docs.python.org/3/tutorial/index.html None None None None + 2 NaN NaN NaN NaN NaN For slightly more complex use cases like splitting the html document name from a url, a combination of parameter settings can be used. @@ -2658,7 +2669,9 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): expressions. >>> s = pd.Series(["1+1=2"]) - + >>> s + 0 1+1=2 + dtype: object >>> s.str.split(r"\+|=", expand=True) 0 1 2 0 1 1 2 @@ -2750,7 +2763,7 @@ def rsplit(self, pat=None, n=-1, expand=False): >>> idx.str.partition() MultiIndex([('X', ' ', '123'), ('Y', ' ', '999')], - dtype='object') + ) Or an index with tuples with ``expand=False``:
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33328
2020-04-06T16:08:05Z
2020-04-06T20:42:01Z
2020-04-06T20:42:01Z
2020-04-07T10:49:02Z
CI: Add argument doc/source/development to formatting docstrings in code_checks.sh
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index cd9e4384fd0d9..117d21cfa342c 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -344,7 +344,7 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Validate correct capitalization among titles in documentation' ; echo $MSG - $BASE_DIR/scripts/validate_rst_title_capitalization.py $BASE_DIR/doc/source/development/contributing.rst $BASE_DIR/doc/source/reference + $BASE_DIR/scripts/validate_rst_title_capitalization.py $BASE_DIR/doc/source/development $BASE_DIR/doc/source/reference RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py index 3d19e37ac7a1d..fdf824a2918ec 100755 --- a/scripts/validate_rst_title_capitalization.py +++ b/scripts/validate_rst_title_capitalization.py @@ -130,7 +130,24 @@ def correct_title_capitalization(title: str) -> str: # Strip all non-word characters from the beginning of the title to the # first word character. - correct_title: str = re.sub(r"^\W*", "", title).capitalize() + correct_title: str = re.sub(r"^\W*", "", title) + + # Take into consideration words with multiple capital letters + # Such as DataFrame or PeriodIndex or IO to not lower them. + # Lower the other words + if re.search(r"((?:[A-Z]\w*){2,})", correct_title): + list_words: List[str] = correct_title.split(" ") + if correct_title[0].islower(): + list_words[0].replace(correct_title[0], correct_title[0].upper()) + + for idx in range(1, len(list_words)): + if not re.search(r"((?:[A-Z]\w*){2,})", list_words[idx]): + list_words[idx] = list_words[idx].lower() + + correct_title = " ".join(list_words) + + else: + correct_title = correct_title.capitalize() # Remove a URL from the title. We do this because words in a URL must # stay lowercase, even if they are a capitalization exception.
Regarding issue #32550
https://api.github.com/repos/pandas-dev/pandas/pulls/33325
2020-04-06T14:45:07Z
2020-04-07T00:59:55Z
null
2020-04-07T00:59:55Z
PERF: fix placement when slicing a Series
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index ac8de977b9a1a..c6efd6a2ac6a7 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1568,7 +1568,7 @@ def get_slice(self, slobj: slice, axis: int = 0) -> "SingleBlockManager": blk = self._block array = blk._slice(slobj) - block = blk.make_block_same_class(array, placement=range(len(array))) + block = blk.make_block_same_class(array, placement=slice(0, len(array))) return type(self)(block, self.index[slobj]) @property
Closes https://github.com/pandas-dev/pandas/issues/33323
https://api.github.com/repos/pandas-dev/pandas/pulls/33324
2020-04-06T14:22:55Z
2020-04-06T16:11:08Z
2020-04-06T16:11:08Z
2020-04-06T16:11:08Z
TST: Don't use 'is' on strings to avoid SyntaxWarning
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 961c18749f055..b28e8a5b347aa 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -234,9 +234,16 @@ def test_set_index_pass_arrays_duplicate( # need to adapt first drop for case that both keys are 'A' -- # cannot drop the same column twice; - # use "is" because == would give ambiguous Boolean error for containers + # plain == would give ambiguous Boolean error for containers first_drop = ( - False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632 + False + if ( + isinstance(keys[0], str) + and keys[0] == "A" + and isinstance(keys[1], str) + and keys[1] == "A" + ) + else drop ) # to test against already-tested behaviour, we add sequentially, # hence second append always True; must wrap keys in list, otherwise
This avoids the below warning (in Python 3.8, [user-visible in Debian](https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=956021) because they byte-compile everything on install), and possibly unspecified behaviour (though I haven't seen that in practice). ``` /usr/lib/python3/dist-packages/pandas/tests/frame/test_alter_axes.py:241: SyntaxWarning: "is" with a literal. Did you mean "=="? False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/33322
2020-04-06T14:13:00Z
2020-04-06T21:34:55Z
2020-04-06T21:34:55Z
2020-04-06T21:35:00Z
DOC/CLN: Fix docstring typo
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 3aa8a1e93355d..d0319e9181bad 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4232,7 +4232,7 @@ def equals(self, other: Any) -> bool: >>> idx1.equals(idx2) False - The oreder is compared + The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx
https://api.github.com/repos/pandas-dev/pandas/pulls/33320
2020-04-06T12:54:06Z
2020-04-06T13:05:16Z
2020-04-06T13:05:16Z
2020-04-06T16:13:13Z
Changed files permissions to be the same
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py old mode 100755 new mode 100644 diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py old mode 100755 new mode 100644 diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py old mode 100755 new mode 100644
Not 100% sure about this change, feel free to close at anytime.
https://api.github.com/repos/pandas-dev/pandas/pulls/33318
2020-04-06T12:22:30Z
2020-04-06T14:49:14Z
2020-04-06T14:49:14Z
2020-04-06T15:05:44Z
CLN: Added static types for `pandas/_libs/reduction.pyx`
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 9f8579606014a..4a9c89848a9d8 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -36,7 +36,12 @@ cdef class Reducer: object dummy, f, labels, typ, ityp, index ndarray arr - def __init__(self, ndarray arr, object f, axis=1, dummy=None, labels=None): + def __init__( + self, ndarray arr, object f, int axis=1, object dummy=None, object labels=None + ): + cdef: + Py_ssize_t n, k + n, k = (<object>arr).shape if axis == 0: @@ -60,7 +65,7 @@ cdef class Reducer: self.dummy, self.typ, self.index, self.ityp = self._check_dummy( dummy=dummy) - cdef _check_dummy(self, dummy=None): + cdef _check_dummy(self, object dummy=None): cdef: object index = None, typ = None, ityp = None @@ -147,7 +152,7 @@ cdef class Reducer: cdef class _BaseGrouper: - cdef _check_dummy(self, dummy): + cdef _check_dummy(self, object dummy): # both values and index must be an ndarray! values = dummy.values @@ -190,13 +195,16 @@ cdef class _BaseGrouper: """ Call self.f on our new group, then update to the next group. """ + cdef: + object res + cached_ityp._engine.clear_mapping() res = self.f(cached_typ) res = _extract_result(res) if not initialized: # On the first pass, we check the output shape to see # if this looks like a reduction. - initialized = 1 + initialized = True _check_result_array(res, len(self.dummy_arr)) islider.advance(group_size) @@ -534,7 +542,11 @@ cdef class BlockSlider: cdef: char **base_ptrs - def __init__(self, frame): + def __init__(self, object frame): + cdef: + Py_ssize_t i + object b + self.frame = frame self.dummy = frame[:0] self.index = self.dummy.index
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33316
2020-04-06T10:26:24Z
2020-04-06T14:48:12Z
2020-04-06T14:48:11Z
2020-04-06T15:07:07Z
DOC: do not include type hints in signature in html docs
diff --git a/doc/source/conf.py b/doc/source/conf.py index 35833627f6c05..d24483abd28e1 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -109,6 +109,7 @@ ) ) autosummary_generate = True if pattern is None else ["index"] +autodoc_typehints = "none" # numpydoc numpydoc_attributes_as_param_list = False
See https://github.com/pandas-dev/pandas/issues/33025. Removing the type hints in the online docs until we have a better solution to improve readability.
https://api.github.com/repos/pandas-dev/pandas/pulls/33312
2020-04-06T08:11:52Z
2020-04-06T20:52:53Z
2020-04-06T20:52:53Z
2020-04-06T20:55:44Z
Update citation webpage
diff --git a/README.md b/README.md index 5342eda4390eb..8c3617df2e8ad 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ # pandas: powerful Python data analysis toolkit [![PyPI Latest Release](https://img.shields.io/pypi/v/pandas.svg)](https://pypi.org/project/pandas/) [![Conda Latest Release](https://anaconda.org/conda-forge/pandas/badges/version.svg)](https://anaconda.org/anaconda/pandas/) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3509134.svg)](https://doi.org/10.5281/zenodo.3509134) [![Package Status](https://img.shields.io/pypi/status/pandas.svg)](https://pypi.org/project/pandas/) [![License](https://img.shields.io/pypi/l/pandas.svg)](https://github.com/pandas-dev/pandas/blob/master/LICENSE) [![Travis Build Status](https://travis-ci.org/pandas-dev/pandas.svg?branch=master)](https://travis-ci.org/pandas-dev/pandas) diff --git a/web/pandas/about/citing.md b/web/pandas/about/citing.md index d5cb64e58f0ad..25d2c86061daa 100644 --- a/web/pandas/about/citing.md +++ b/web/pandas/about/citing.md @@ -2,31 +2,35 @@ ## Citing pandas -If you use _pandas_ for a scientific publication, we would appreciate citations to one of the following papers: +If you use _pandas_ for a scientific publication, we would appreciate citations to the published software and the +following paper: + +- [pandas on Zenodo](https://zenodo.org/record/3715232#.XoqFyC2ZOL8), + Please find us on Zenodo and replace with the citation for the version you are using. You cna replace the full author + list from there with "The pandas development team" like in the example below. + + @software{reback2020pandas, + author = {The pandas development team}, + title = {pandas-dev/pandas: Pandas}, + month = feb, + year = 2020, + publisher = {Zenodo}, + version = {latest}, + doi = {10.5281/zenodo.3509134}, + url = {https://doi.org/10.5281/zenodo.3509134} + } - [Data structures for statistical computing in python](https://conference.scipy.org/proceedings/scipy2010/pdfs/mckinney.pdf), McKinney, Proceedings of the 9th Python in Science Conference, Volume 445, 2010. - @inproceedings{mckinney2010data, - title={Data structures for statistical computing in python}, - author={Wes McKinney}, - booktitle={Proceedings of the 9th Python in Science Conference}, - volume={445}, - pages={51--56}, - year={2010}, - organization={Austin, TX} - } - - -- [pandas: a foundational Python library for data analysis and statistics](https://www.scribd.com/document/71048089/pandas-a-Foundational-Python-Library-for-Data-Analysis-and-Statistics), - McKinney, Python for High Performance and Scientific Computing, Volume 14, 2011. - - @article{mckinney2011pandas, - title={pandas: a foundational Python library for data analysis and statistics}, - author={Wes McKinney}, - journal={Python for High Performance and Scientific Computing}, - volume={14}, - year={2011} + @InProceedings{ mckinney-proc-scipy-2010, + author = { {W}es {M}c{K}inney }, + title = { {D}ata {S}tructures for {S}tatistical {C}omputing in {P}ython }, + booktitle = { {P}roceedings of the 9th {P}ython in {S}cience {C}onference }, + pages = { 56 - 61 }, + year = { 2010 }, + editor = { {S}t\'efan van der {W}alt and {J}arrod {M}illman }, + doi = { 10.25080/Majora-92bf1922-00a } } ## Brand and logo
Follow-up of #32388, addressing #24036 I will leave it to the pandas team to decide whether to put in there a BibTeX entry with the concept DOI or a specific version, some options of dealing with this are described [in this comment](https://github.com/sherpa/sherpa/pull/634#issuecomment-553668211). Note how [this comment thread](https://github.com/pandas-dev/pandas/pull/32388#discussion_r386392799) on the previous PR asked to replace the author list by "The pandas development team". However, if users go to Zenodo to get the correct BibTeX entry of the version they're actually using, their citation will contain the full author list provided in Zenodo.
https://api.github.com/repos/pandas-dev/pandas/pulls/33311
2020-04-06T01:45:50Z
2020-04-10T17:43:37Z
2020-04-10T17:43:37Z
2020-05-07T10:10:11Z
CLN: remove fill_tuple kludge
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d8b54fd5cffb3..a968fe1fa68f9 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1241,7 +1241,7 @@ def func(x): blocks = [self.make_block_same_class(interp_values)] return self._maybe_downcast(blocks, downcast) - def take_nd(self, indexer, axis: int, new_mgr_locs=None, fill_tuple=None): + def take_nd(self, indexer, axis: int, new_mgr_locs=None, fill_value=lib.no_default): """ Take values according to indexer and return them as a block.bb @@ -1252,11 +1252,10 @@ def take_nd(self, indexer, axis: int, new_mgr_locs=None, fill_tuple=None): values = self.values - if fill_tuple is None: + if fill_value is lib.no_default: fill_value = self.fill_value allow_fill = False else: - fill_value = fill_tuple[0] allow_fill = True new_values = algos.take_nd( @@ -1721,14 +1720,14 @@ def to_native_types(self, na_rep="nan", quoting=None, **kwargs): # we are expected to return a 2-d ndarray return values.reshape(1, len(values)) - def take_nd(self, indexer, axis: int = 0, new_mgr_locs=None, fill_tuple=None): + def take_nd( + self, indexer, axis: int = 0, new_mgr_locs=None, fill_value=lib.no_default + ): """ Take values according to indexer and return them as a block. """ - if fill_tuple is None: + if fill_value is lib.no_default: fill_value = None - else: - fill_value = fill_tuple[0] # axis doesn't matter; we are really a single-dim object # but are passed the axis depending on the calling routing diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index ac8de977b9a1a..00fbcd034163a 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1299,14 +1299,14 @@ def reindex_indexer( raise IndexError("Requested axis not found in manager") if axis == 0: - new_blocks = self._slice_take_blocks_ax0(indexer, fill_tuple=(fill_value,)) + new_blocks = self._slice_take_blocks_ax0(indexer, fill_value=fill_value) else: new_blocks = [ blk.take_nd( indexer, axis=axis, - fill_tuple=( - fill_value if fill_value is not None else blk.fill_value, + fill_value=( + fill_value if fill_value is not None else blk.fill_value ), ) for blk in self.blocks @@ -1317,7 +1317,7 @@ def reindex_indexer( return type(self).from_blocks(new_blocks, new_axes) - def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): + def _slice_take_blocks_ax0(self, slice_or_indexer, fill_value=lib.no_default): """ Slice/take blocks along axis=0. @@ -1327,7 +1327,7 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): ------- new_blocks : list of Block """ - allow_fill = fill_tuple is not None + allow_fill = fill_value is not lib.no_default sl_type, slobj, sllen = _preprocess_slice_or_indexer( slice_or_indexer, self.shape[0], allow_fill=allow_fill @@ -1339,16 +1339,15 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): if sl_type in ("slice", "mask"): return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] elif not allow_fill or self.ndim == 1: - if allow_fill and fill_tuple[0] is None: + if allow_fill and fill_value is None: _, fill_value = maybe_promote(blk.dtype) - fill_tuple = (fill_value,) return [ blk.take_nd( slobj, axis=0, new_mgr_locs=slice(0, sllen), - fill_tuple=fill_tuple, + fill_value=fill_value, ) ] @@ -1371,8 +1370,7 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): blocks = [] for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=True): if blkno == -1: - # If we've got here, fill_tuple was not None. - fill_value = fill_tuple[0] + # If we've got here, fill_value was not lib.no_default blocks.append( self._make_na_block(placement=mgr_locs, fill_value=fill_value) @@ -1393,10 +1391,7 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): else: blocks.append( blk.take_nd( - blklocs[mgr_locs.indexer], - axis=0, - new_mgr_locs=mgr_locs, - fill_tuple=None, + blklocs[mgr_locs.indexer], axis=0, new_mgr_locs=mgr_locs, ) )
https://api.github.com/repos/pandas-dev/pandas/pulls/33310
2020-04-06T01:35:09Z
2020-04-06T21:39:21Z
2020-04-06T21:39:21Z
2020-04-07T11:51:11Z
DOC: include Offset.__call__ to autosummary to fix sphinx warning
diff --git a/doc/source/reference/offset_frequency.rst b/doc/source/reference/offset_frequency.rst index fc1c6d6bd6d47..17544cb7a1225 100644 --- a/doc/source/reference/offset_frequency.rst +++ b/doc/source/reference/offset_frequency.rst @@ -37,6 +37,7 @@ Methods DateOffset.onOffset DateOffset.is_anchored DateOffset.is_on_offset + DateOffset.__call__ BusinessDay ----------- @@ -69,6 +70,7 @@ Methods BusinessDay.onOffset BusinessDay.is_anchored BusinessDay.is_on_offset + BusinessDay.__call__ BusinessHour ------------ @@ -100,6 +102,7 @@ Methods BusinessHour.onOffset BusinessHour.is_anchored BusinessHour.is_on_offset + BusinessHour.__call__ CustomBusinessDay ----------------- @@ -131,6 +134,7 @@ Methods CustomBusinessDay.onOffset CustomBusinessDay.is_anchored CustomBusinessDay.is_on_offset + CustomBusinessDay.__call__ CustomBusinessHour ------------------ @@ -162,6 +166,7 @@ Methods CustomBusinessHour.onOffset CustomBusinessHour.is_anchored CustomBusinessHour.is_on_offset + CustomBusinessHour.__call__ MonthOffset ----------- @@ -194,6 +199,7 @@ Methods MonthOffset.onOffset MonthOffset.is_anchored MonthOffset.is_on_offset + MonthOffset.__call__ MonthEnd -------- @@ -226,6 +232,7 @@ Methods MonthEnd.onOffset MonthEnd.is_anchored MonthEnd.is_on_offset + MonthEnd.__call__ MonthBegin ---------- @@ -258,6 +265,7 @@ Methods MonthBegin.onOffset MonthBegin.is_anchored MonthBegin.is_on_offset + MonthBegin.__call__ BusinessMonthEnd ---------------- @@ -290,6 +298,7 @@ Methods BusinessMonthEnd.onOffset BusinessMonthEnd.is_anchored BusinessMonthEnd.is_on_offset + BusinessMonthEnd.__call__ BusinessMonthBegin ------------------ @@ -322,6 +331,7 @@ Methods BusinessMonthBegin.onOffset BusinessMonthBegin.is_anchored BusinessMonthBegin.is_on_offset + BusinessMonthBegin.__call__ CustomBusinessMonthEnd ---------------------- @@ -354,6 +364,7 @@ Methods CustomBusinessMonthEnd.onOffset CustomBusinessMonthEnd.is_anchored CustomBusinessMonthEnd.is_on_offset + CustomBusinessMonthEnd.__call__ CustomBusinessMonthBegin ------------------------ @@ -386,6 +397,7 @@ Methods CustomBusinessMonthBegin.onOffset CustomBusinessMonthBegin.is_anchored CustomBusinessMonthBegin.is_on_offset + CustomBusinessMonthBegin.__call__ SemiMonthOffset --------------- @@ -418,6 +430,7 @@ Methods SemiMonthOffset.onOffset SemiMonthOffset.is_anchored SemiMonthOffset.is_on_offset + SemiMonthOffset.__call__ SemiMonthEnd ------------ @@ -450,6 +463,7 @@ Methods SemiMonthEnd.onOffset SemiMonthEnd.is_anchored SemiMonthEnd.is_on_offset + SemiMonthEnd.__call__ SemiMonthBegin -------------- @@ -482,6 +496,7 @@ Methods SemiMonthBegin.onOffset SemiMonthBegin.is_anchored SemiMonthBegin.is_on_offset + SemiMonthBegin.__call__ Week ---- @@ -514,6 +529,7 @@ Methods Week.onOffset Week.is_anchored Week.is_on_offset + Week.__call__ WeekOfMonth ----------- @@ -545,6 +561,7 @@ Methods WeekOfMonth.onOffset WeekOfMonth.is_anchored WeekOfMonth.is_on_offset + WeekOfMonth.__call__ LastWeekOfMonth --------------- @@ -576,6 +593,7 @@ Methods LastWeekOfMonth.onOffset LastWeekOfMonth.is_anchored LastWeekOfMonth.is_on_offset + LastWeekOfMonth.__call__ QuarterOffset ------------- @@ -608,6 +626,7 @@ Methods QuarterOffset.onOffset QuarterOffset.is_anchored QuarterOffset.is_on_offset + QuarterOffset.__call__ BQuarterEnd ----------- @@ -640,6 +659,7 @@ Methods BQuarterEnd.onOffset BQuarterEnd.is_anchored BQuarterEnd.is_on_offset + BQuarterEnd.__call__ BQuarterBegin ------------- @@ -672,6 +692,7 @@ Methods BQuarterBegin.onOffset BQuarterBegin.is_anchored BQuarterBegin.is_on_offset + BQuarterBegin.__call__ QuarterEnd ---------- @@ -704,6 +725,7 @@ Methods QuarterEnd.onOffset QuarterEnd.is_anchored QuarterEnd.is_on_offset + QuarterEnd.__call__ QuarterBegin ------------ @@ -736,6 +758,7 @@ Methods QuarterBegin.onOffset QuarterBegin.is_anchored QuarterBegin.is_on_offset + QuarterBegin.__call__ YearOffset ---------- @@ -768,6 +791,7 @@ Methods YearOffset.onOffset YearOffset.is_anchored YearOffset.is_on_offset + YearOffset.__call__ BYearEnd -------- @@ -800,6 +824,7 @@ Methods BYearEnd.onOffset BYearEnd.is_anchored BYearEnd.is_on_offset + BYearEnd.__call__ BYearBegin ---------- @@ -832,6 +857,7 @@ Methods BYearBegin.onOffset BYearBegin.is_anchored BYearBegin.is_on_offset + BYearBegin.__call__ YearEnd ------- @@ -864,6 +890,7 @@ Methods YearEnd.onOffset YearEnd.is_anchored YearEnd.is_on_offset + YearEnd.__call__ YearBegin --------- @@ -896,6 +923,7 @@ Methods YearBegin.onOffset YearBegin.is_anchored YearBegin.is_on_offset + YearBegin.__call__ FY5253 ------ @@ -929,6 +957,7 @@ Methods FY5253.onOffset FY5253.is_anchored FY5253.is_on_offset + FY5253.__call__ FY5253Quarter ------------- @@ -962,6 +991,7 @@ Methods FY5253Quarter.is_anchored FY5253Quarter.is_on_offset FY5253Quarter.year_has_extra_week + FY5253Quarter.__call__ Easter ------ @@ -993,6 +1023,7 @@ Methods Easter.onOffset Easter.is_anchored Easter.is_on_offset + Easter.__call__ Tick ---- @@ -1024,6 +1055,7 @@ Methods Tick.onOffset Tick.is_anchored Tick.is_on_offset + Tick.__call__ Day --- @@ -1055,6 +1087,7 @@ Methods Day.onOffset Day.is_anchored Day.is_on_offset + Day.__call__ Hour ---- @@ -1086,6 +1119,7 @@ Methods Hour.onOffset Hour.is_anchored Hour.is_on_offset + Hour.__call__ Minute ------ @@ -1117,6 +1151,7 @@ Methods Minute.onOffset Minute.is_anchored Minute.is_on_offset + Minute.__call__ Second ------ @@ -1148,6 +1183,7 @@ Methods Second.onOffset Second.is_anchored Second.is_on_offset + Second.__call__ Milli ----- @@ -1179,6 +1215,7 @@ Methods Milli.onOffset Milli.is_anchored Milli.is_on_offset + Milli.__call__ Micro ----- @@ -1210,6 +1247,7 @@ Methods Micro.onOffset Micro.is_anchored Micro.is_on_offset + Micro.__call__ Nano ---- @@ -1241,6 +1279,7 @@ Methods Nano.onOffset Nano.is_anchored Nano.is_on_offset + Nano.__call__ BDay ---- @@ -1277,6 +1316,7 @@ Methods BDay.is_on_offset BDay.rollback BDay.rollforward + BDay.__call__ BMonthEnd --------- @@ -1312,6 +1352,7 @@ Methods BMonthEnd.is_on_offset BMonthEnd.rollback BMonthEnd.rollforward + BMonthEnd.__call__ BMonthBegin ----------- @@ -1347,6 +1388,7 @@ Methods BMonthBegin.is_on_offset BMonthBegin.rollback BMonthBegin.rollforward + BMonthBegin.__call__ CBMonthEnd ---------- @@ -1386,6 +1428,7 @@ Methods CBMonthEnd.is_on_offset CBMonthEnd.rollback CBMonthEnd.rollforward + CBMonthEnd.__call__ CBMonthBegin ------------ @@ -1425,6 +1468,7 @@ Methods CBMonthBegin.is_on_offset CBMonthBegin.rollback CBMonthBegin.rollforward + CBMonthBegin.__call__ CDay ---- @@ -1461,6 +1505,7 @@ Methods CDay.is_on_offset CDay.rollback CDay.rollforward + CDay.__call__ .. _api.frequencies:
https://api.github.com/repos/pandas-dev/pandas/pulls/33309
2020-04-06T01:23:39Z
2020-04-06T13:59:55Z
2020-04-06T13:59:55Z
2020-05-26T09:39:48Z
REF: call _block_shape from EABlock.make_block
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index bc45b7c74ecc1..d6838355e6791 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -10,7 +10,6 @@ IntBlock, ObjectBlock, TimeDeltaBlock, - _block_shape, _safe_reshape, make_block, ) @@ -34,7 +33,6 @@ "TimeDeltaBlock", "_safe_reshape", "make_block", - "_block_shape", "BlockManager", "SingleBlockManager", "concatenate_block_managers", diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d8b54fd5cffb3..316811f46cbfb 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -244,6 +244,8 @@ def make_block(self, values, placement=None) -> "Block": """ if placement is None: placement = self.mgr_locs + if self.is_extension: + values = _block_shape(values, ndim=self.ndim) return make_block(values, placement=placement, ndim=self.ndim) @@ -355,13 +357,12 @@ def _split_op_result(self, result) -> List["Block"]: nbs = [] for i, loc in enumerate(self.mgr_locs): vals = result[i] - nv = _block_shape(vals, ndim=self.ndim) - block = self.make_block(values=nv, placement=[loc]) + block = self.make_block(values=vals, placement=[loc]) nbs.append(block) return nbs if not isinstance(result, Block): - result = self.make_block(values=_block_shape(result, ndim=self.ndim)) + result = self.make_block(result) return [result] @@ -1277,9 +1278,6 @@ def take_nd(self, indexer, axis: int, new_mgr_locs=None, fill_tuple=None): def diff(self, n: int, axis: int = 1) -> List["Block"]: """ return block for the diff of the values """ new_values = algos.diff(self.values, n, axis=axis, stacklevel=7) - # We use block_shape for ExtensionBlock subclasses, which may call here - # via a super. - new_values = _block_shape(new_values, ndim=self.ndim) return [self.make_block(values=new_values)] def shift(self, periods: int, axis: int = 0, fill_value=None): @@ -2267,7 +2265,7 @@ def concat_same_type(self, to_concat): values = values.astype(object, copy=False) placement = self.mgr_locs if self.ndim == 2 else slice(len(values)) - return self.make_block(_block_shape(values, self.ndim), placement=placement) + return self.make_block(values, placement=placement) return super().concat_same_type(to_concat) def fillna(self, value, limit=None, inplace=False, downcast=None): @@ -2456,7 +2454,6 @@ def f(mask, val, idx): # TODO: allow EA once reshape is supported values = values.reshape(shape) - values = _block_shape(values, ndim=self.ndim) return values if self.ndim == 2: @@ -2738,9 +2735,7 @@ def concat_same_type(self, to_concat): ) placement = self.mgr_locs if self.ndim == 2 else slice(len(values)) # not using self.make_block_same_class as values can be object dtype - return self.make_block( - _block_shape(values, ndim=self.ndim), placement=placement - ) + return self.make_block(values, placement=placement) def replace( self, @@ -2859,16 +2854,15 @@ def _extend_blocks(result, blocks=None): return blocks -def _block_shape(values, ndim=1, shape=None): +def _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: """ guarantee the shape of the values to be at least 1 d """ if values.ndim < ndim: - if shape is None: - shape = values.shape - if not is_extension_array_dtype(values): - # TODO: https://github.com/pandas-dev/pandas/issues/23023 + shape = values.shape + if not is_extension_array_dtype(values.dtype): + # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. - values = values.reshape(tuple((1,) + shape)) + values = values.reshape(tuple((1,) + shape)) # type: ignore return values
https://api.github.com/repos/pandas-dev/pandas/pulls/33308
2020-04-05T22:46:42Z
2020-04-10T16:02:48Z
2020-04-10T16:02:48Z
2020-04-10T18:13:17Z
TST: misplaced reduction/indexing tests
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 6525e93d89fce..e1fc7e9d7c5b8 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1274,3 +1274,28 @@ def test_series_broadcasting(self): df_nan.clip(lower=s, axis=0) for op in ["lt", "le", "gt", "ge", "eq", "ne"]: getattr(df, op)(s_nan, axis=0) + + +class TestDataFrameReductions: + def test_min_max_dt64_with_NaT(self): + # Both NaT and Timestamp are in DataFrame. + df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]}) + + res = df.min() + exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"]) + tm.assert_series_equal(res, exp) + + res = df.max() + exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"]) + tm.assert_series_equal(res, exp) + + # GH12941, only NaTs are in DataFrame. + df = pd.DataFrame({"foo": [pd.NaT, pd.NaT]}) + + res = df.min() + exp = pd.Series([pd.NaT], index=["foo"]) + tm.assert_series_equal(res, exp) + + res = df.max() + exp = pd.Series([pd.NaT], index=["foo"]) + tm.assert_series_equal(res, exp) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 452af895e4967..dea921a92ae37 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -54,29 +54,6 @@ def test_frame_append_datetime64_col_other_units(self): assert (tmp["dates"].values == ex_vals).all() - def test_operation_on_NaT(self): - # Both NaT and Timestamp are in DataFrame. - df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]}) - - res = df.min() - exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"]) - tm.assert_series_equal(res, exp) - - res = df.max() - exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"]) - tm.assert_series_equal(res, exp) - - # GH12941, only NaTs are in DataFrame. - df = pd.DataFrame({"foo": [pd.NaT, pd.NaT]}) - - res = df.min() - exp = pd.Series([pd.NaT], index=["foo"]) - tm.assert_series_equal(res, exp) - - res = df.max() - exp = pd.Series([pd.NaT], index=["foo"]) - tm.assert_series_equal(res, exp) - def test_datetime_assignment_with_NaT_and_diff_time_units(self): # GH 7492 data_ns = np.array([1, "nat"], dtype="datetime64[ns]") diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 5882f5c77428b..58e2afc869e02 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -476,6 +476,13 @@ def test_get_loc_reasonable_key_error(self): index.get_loc("1/1/2000") +class TestContains: + def test_index_dupes_contains(self): + d = datetime(2011, 12, 5, 20, 30) + ix = DatetimeIndex([d, d]) + assert d in ix + + class TestDatetimeIndex: @pytest.mark.parametrize( "null", [None, np.nan, np.datetime64("NaT"), pd.NaT, pd.NA] diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index b5d04fd499c08..18c11f2b9eb61 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -464,12 +464,6 @@ def test_index_unique(dups): assert idx.nunique(dropna=False) == 21 -def test_index_dupes_contains(): - d = datetime(2011, 12, 5, 20, 30) - ix = DatetimeIndex([d, d]) - assert d in ix - - def test_duplicate_dates_indexing(dups): ts = dups @@ -705,15 +699,6 @@ def test_set_none_nan(): assert series[6] is NaT -def test_nat_operations(): - # GH 8617 - s = Series([0, pd.NaT], dtype="m8[ns]") - exp = s[0] - assert s.median() == exp - assert s.min() == exp - assert s.max() == exp - - def test_setitem_tuple_with_datetimetz(): # GH 20441 arr = date_range("2017", periods=4, tz="US/Eastern") diff --git a/pandas/tests/series/test_reductions.py b/pandas/tests/series/test_reductions.py new file mode 100644 index 0000000000000..be9330a14f9c9 --- /dev/null +++ b/pandas/tests/series/test_reductions.py @@ -0,0 +1,11 @@ +import pandas as pd +from pandas import Series + + +def test_reductions_td64_with_nat(): + # GH#8617 + ser = Series([0, pd.NaT], dtype="m8[ns]") + exp = ser[0] + assert ser.median() == exp + assert ser.min() == exp + assert ser.max() == exp
https://api.github.com/repos/pandas-dev/pandas/pulls/33307
2020-04-05T20:14:50Z
2020-04-06T21:53:20Z
2020-04-06T21:53:20Z
2020-04-06T22:37:23Z
ENH: Named aggregations with multiple columns
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index cbfc6d63e8ea3..8d399dc9051a0 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -88,7 +88,7 @@ Other enhancements - :class:`Series.str` now has a `fullmatch` method that matches a regular expression against the entire string in each row of the series, similar to `re.fullmatch` (:issue:`32806`). - :meth:`DataFrame.sample` will now also allow array-like and BitGenerator objects to be passed to ``random_state`` as seeds (:issue:`32503`) - :meth:`MultiIndex.union` will now raise `RuntimeWarning` if the object inside are unsortable, pass `sort=False` to suppress this warning (:issue:`33015`) -- +- :meth:`DataFrameGroupby.aggregate` will now support named aggregations with multiple columns (:issue:`29268`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/base.py b/pandas/core/base.py index 5945d8a4b432d..9cd2330c4a12b 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -299,6 +299,7 @@ def _aggregate(self, arg, *args, **kwargs): None if not required """ is_aggregator = lambda x: isinstance(x, (list, tuple, dict)) + deserialized_keys = {} _axis = kwargs.pop("_axis", None) if _axis is None: @@ -339,8 +340,22 @@ def _aggregate(self, arg, *args, **kwargs): raise SpecificationError("nested renamer is not supported") elif isinstance(obj, ABCSeries): raise SpecificationError("nested renamer is not supported") - elif isinstance(obj, ABCDataFrame) and k not in obj.columns: - raise KeyError(f"Column '{k}' does not exist!") + elif isinstance(obj, ABCDataFrame): + # GH 29268 + if k not in obj.columns: + # Check if list thingy + try: + keys = np.frombuffer(k, dtype=np.dtype("<U1")) + except (AttributeError, TypeError): + raise KeyError(f"Column '{k}' does not exist!") + + # Check keys + for key in keys: + if key not in obj.columns: + raise KeyError(f"Column '{key}' does not exist!") + + # Memorize operation + deserialized_keys[k] = keys arg = new_arg @@ -374,6 +389,14 @@ def _agg_2dim(how): colg = self._gotitem(self._selection, ndim=2, subset=obj) return colg.aggregate(how) + # GH 29268 + def _agg_multi_dim(name, how, keys): + from pandas.core.frame import DataFrame + + _obj = {k: self._gotitem(k, ndim=1, subset=None) for k in keys} + result = {com.get_callable_name(agg): agg(_obj) for agg in how} + return DataFrame(result, columns=result.keys()) + def _agg(arg, func): """ run the aggregations over the arg with func @@ -381,7 +404,13 @@ def _agg(arg, func): """ result = {} for fname, agg_how in arg.items(): - result[fname] = func(fname, agg_how) + # GH 29268 + if fname in deserialized_keys: + keys = deserialized_keys[fname] + result[fname] = _agg_multi_dim(fname, agg_how, keys) + else: + result[fname] = func(fname, agg_how) + return result # set the final keys @@ -412,11 +441,9 @@ def _agg(arg, func): # no selection else: - try: result = _agg(arg, _agg_1dim) except SpecificationError: - # we are aggregating expecting all 1d-returns # but we have 2d result = _agg(arg, _agg_2dim) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 208cbfc5b06d6..cc79f54202c5d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -879,23 +879,30 @@ class DataFrameGroupBy(GroupBy[DataFrame]): 1 1 2 0.590716 2 3 4 0.704907 - To control the output names with different aggregations per column, + To control the output names with different aggregations, pandas supports "named aggregation" >>> df.groupby("A").agg( ... b_min=pd.NamedAgg(column="B", aggfunc="min"), - ... c_sum=pd.NamedAgg(column="C", aggfunc="sum")) - b_min c_sum + ... c_sum=pd.NamedAgg(column="C", aggfunc="sum"), + ... cb_sum_diff=pd.NamedAgg( + ... column=["B", "C"], + ... aggfunc=lambda x: x["C"].sum() - x["B"].sum() + ... ) + ... ) + b_min c_sum cb_sum_diff A - 1 1 -1.956929 - 2 3 -0.322183 + 1 1 1.449287 -1.550713 + 2 3 0.110498 -6.889502 - The keywords are the *output* column names - - The values are tuples whose first element is the column to select + - The values are tuples whose first element is the column(s) to select and the second element is the aggregation to apply to that column. Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields ``['column', 'aggfunc']`` to make it clearer what the arguments are. As usual, the aggregation can be a callable or a string alias. + - When performing named aggregations with multiple columns, the second + element has to be a lambda and returns a 1 dimension DataFrame. See :ref:`groupby.aggregate.named` for more. """ @@ -910,11 +917,17 @@ class DataFrameGroupBy(GroupBy[DataFrame]): ) @Appender(_shared_docs["aggregate"]) def aggregate(self, func=None, *args, **kwargs): - relabeling = func is None and is_multi_agg_with_relabel(**kwargs) if relabeling: - func, columns, order = normalize_keyword_aggregation(kwargs) + # GH 29268 + from types import LambdaType + for k, v in list(kwargs.items()): + if isinstance(v[0], list) & isinstance(v[1], LambdaType): + serialized_key = np.sort(np.array(v[0])) + kwargs[k] = (serialized_key.tobytes(),) + v[1:] + + func, columns, order = normalize_keyword_aggregation(kwargs) kwargs = {} elif isinstance(func, list) and len(func) > len(set(func)): diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index e860ea1a3d052..b2c3b324f21bb 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -636,6 +636,34 @@ def test_mangled(self): ) tm.assert_frame_equal(result, expected) + def test_agg_multiple_columns(self): + df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + result = df.groupby("A").agg( + add=(["B", "C"], lambda x: x["B"].max() + x["C"].min()), + minus=(["C", "B"], lambda x: x["B"].max() - x["C"].min()), + ) + expected = pd.DataFrame( + {"add": [5, 9], "minus": [-1, -1]}, index=pd.Index([0, 1], name="A") + ) + tm.assert_frame_equal(result, expected) + + def test_agg_multi_missing_column_raises(self): + df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + with pytest.raises(KeyError, match="Column 'D' does not exist"): + df.groupby("A").agg( + minus=(["D", "C"], lambda x: x["D"].max() - x["C"].min()), + ) + + def test_agg_multi_missing_key_raises(self): + df = pd.DataFrame( + {"A": [0, 0, 1, 1], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6], "D": [0, 0, 1, 1]} + ) + # shouldn't be able to get aggregrations on columns not specified + with pytest.raises(KeyError, match="D"): + df.groupby("A").agg( + minus=(["B", "C"], lambda x: x["D"].max() - x["D"].min()), + ) + @pytest.mark.parametrize( "agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3",
- [ ] closes #29268 - [x] tests added - [ ] tests passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33306
2020-04-05T19:28:13Z
2020-06-26T15:42:34Z
null
2020-06-26T18:40:06Z
REGR: Fix construction of PeriodIndex from strings
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 584e21e87390d..cd55a4098b542 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -388,6 +388,7 @@ Datetimelike - :class:`Timestamp` raising confusing error message when year, month or day is missing (:issue:`31200`) - Bug in :class:`DatetimeIndex` constructor incorrectly accepting ``bool``-dtyped inputs (:issue:`32668`) - Bug in :meth:`DatetimeIndex.searchsorted` not accepting a ``list`` or :class:`Series` as its argument (:issue:`32762`) +- Bug where :meth:`PeriodIndex` raised when passed a :class:`Series` of strings (:issue:`26109`) - Bug in :class:`Timestamp` arithmetic when adding or subtracting a ``np.ndarray`` with ``timedelta64`` dtype (:issue:`33296`) Timedelta diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 39a3b553b3cf4..99d9d69d66ec2 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -831,11 +831,11 @@ def period_array( """ if is_datetime64_dtype(data): return PeriodArray._from_datetime64(data, freq) - if isinstance(data, (ABCPeriodIndex, ABCSeries, PeriodArray)): + if is_period_dtype(data): return PeriodArray(data, freq) # other iterable of some kind - if not isinstance(data, (np.ndarray, list, tuple)): + if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)): data = list(data) data = np.asarray(data) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index fe35344f46688..7eb0e46ab8f1e 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -10,7 +10,7 @@ import pandas._testing as tm from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.period import PeriodIndex +from pandas.core.indexes.period import Period, PeriodIndex from pandas.core.indexes.timedeltas import TimedeltaIndex @@ -897,3 +897,13 @@ def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg): msg = "[Unexpected type|Cannot compare]" with pytest.raises(TypeError, match=msg): values.searchsorted(arg) + + +@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series]) +def test_period_index_construction_from_strings(klass): + # https://github.com/pandas-dev/pandas/issues/26109 + strings = ["2020Q1", "2020Q2"] * 2 + data = klass(strings) + result = PeriodIndex(data, freq="Q") + expected = PeriodIndex([Period(s) for s in strings]) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py index 0b95d3aa19366..d3ced2f1b1f07 100644 --- a/pandas/tests/arrays/test_period.py +++ b/pandas/tests/arrays/test_period.py @@ -37,6 +37,7 @@ def test_registered(): ([pd.Period("2017", "D"), None], None, [17167, iNaT]), (pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]), (pd.date_range("2017", periods=3), None, [17167, 17168, 17169]), + (pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]), ], ) def test_period_array_ok(data, freq, expected):
- [ ] closes #26109 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33304
2020-04-05T16:45:53Z
2020-04-10T17:29:56Z
2020-04-10T17:29:56Z
2020-04-10T17:36:30Z
BUG: wrong df.groupby().groups when grouping with [Grouper(freq=), ...] (GH33132)
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 742de397956c0..75a0953556a11 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -39,6 +39,7 @@ from pandas.core.dtypes.missing import _maybe_fill, isna import pandas.core.algorithms as algorithms +from pandas.core.arrays import Categorical from pandas.core.base import SelectionMixin import pandas.core.common as com from pandas.core.frame import DataFrame @@ -758,7 +759,11 @@ def _get_grouper(self): We have a specific method of grouping, so cannot convert to a Index for our grouper. """ - return self + # Return an index based on codes_info + grouper = self.result_index._constructor( + Categorical.from_codes(self.codes_info, self.result_index) + ) + return grouper def get_iterator(self, data: FrameOrSeries, axis: int = 0): """ diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index b8d8f56512a69..e5c9c7b5d590c 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1864,6 +1864,20 @@ def test_groupby_groups_in_BaseGrouper(): assert result.groups == expected.groups +def test_groupby_groups_in_BaseGrouper_with_BinGrouper(): + # GH 33132 + # Test if DataFrame grouped with a pandas.Grouper and freq param has correct groups + mi = pd.MultiIndex.from_product( + [date_range(datetime.today(), periods=2), ["C", "D"]], names=["alpha", "beta"] + ) + df = pd.DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi) + result = df.groupby(["beta", pd.Grouper(level="alpha", freq="D")]) + assert result.ngroups == len(result) + + result = df.groupby([pd.Grouper(level="alpha", freq="D"), "beta"]) + assert result.ngroups == len(result) + + @pytest.mark.parametrize("group_name", ["x", ["x"]]) def test_groupby_axis_1(group_name): # GH 27614
- [ ] closes #33132 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33303
2020-04-05T16:23:21Z
2020-09-13T00:14:06Z
null
2020-09-13T00:14:06Z
DOC/CLN: remove versionadded/changed:: 0.21
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 31241287c61cb..ba7f7eb907f4a 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -791,7 +791,7 @@ the ``pandas.util._decorators.deprecate``: from pandas.util._decorators import deprecate - deprecate('old_func', 'new_func', '0.21.0') + deprecate('old_func', 'new_func', '1.1.0') Otherwise, you need to do it manually: @@ -803,7 +803,7 @@ Otherwise, you need to do it manually: def old_func(): """Summary of the function. - .. deprecated:: 0.21.0 + .. deprecated:: 1.1.0 Use new_func instead. """ warnings.warn('Use new_func instead.', FutureWarning, stacklevel=2) @@ -1354,9 +1354,9 @@ directive is used. The sphinx syntax for that is: .. code-block:: rst - .. versionadded:: 0.21.0 + .. versionadded:: 1.1.0 -This will put the text *New in version 0.21.0* wherever you put the sphinx +This will put the text *New in version 1.1.0* wherever you put the sphinx directive. This should also be put in the docstring when adding a new function or method (`example <https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/frame.py#L1495>`__) or a new keyword argument (`example <https://github.com/pandas-dev/pandas/blob/v0.20.2/pandas/core/generic.py#L568>`__). diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst index aa93f37a313f9..055b43bc1e59b 100644 --- a/doc/source/user_guide/basics.rst +++ b/doc/source/user_guide/basics.rst @@ -1224,8 +1224,6 @@ following can be done: This means that the reindexed Series's index is the same Python object as the DataFrame's index. -.. versionadded:: 0.21.0 - :meth:`DataFrame.reindex` also supports an "axis-style" calling convention, where you specify a single ``labels`` argument and the ``axis`` it applies to. @@ -1435,8 +1433,6 @@ Series can also be used: If the mapping doesn't include a column/index label, it isn't renamed. Note that extra labels in the mapping don't throw an error. -.. versionadded:: 0.21.0 - :meth:`DataFrame.rename` also supports an "axis-style" calling convention, where you specify a single ``mapper`` and the ``axis`` to apply that mapping to. diff --git a/doc/source/user_guide/categorical.rst b/doc/source/user_guide/categorical.rst index a55326db748fd..d4faf527a4790 100644 --- a/doc/source/user_guide/categorical.rst +++ b/doc/source/user_guide/categorical.rst @@ -211,8 +211,6 @@ To get back to the original ``Series`` or NumPy array, use CategoricalDtype ---------------- -.. versionchanged:: 0.21.0 - A categorical's type is fully described by 1. ``categories``: a sequence of unique values and no missing values diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index 8cd229070e365..b06c3afa6dfe8 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -1327,8 +1327,6 @@ See the :ref:`visualization documentation<visualization.box>` for more. Piping function calls ~~~~~~~~~~~~~~~~~~~~~ -.. versionadded:: 0.21.0 - Similar to the functionality provided by ``DataFrame`` and ``Series``, functions that take ``GroupBy`` objects can be chained together using a ``pipe`` method to allow for a cleaner, more readable syntax. To read about ``.pipe`` in general terms, diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index d68dc24bae658..a4cc1f9ee02ca 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -461,8 +461,6 @@ specification: pd.read_csv(StringIO(data), dtype={'col1': 'category'}).dtypes -.. versionadded:: 0.21.0 - Specifying ``dtype='category'`` will result in an unordered ``Categorical`` whose ``categories`` are the unique values observed in the data. For more control on the categories and order, create a @@ -2171,8 +2169,6 @@ Line delimited json pandas is able to read and write line-delimited json files that are common in data processing pipelines using Hadoop or Spark. -.. versionadded:: 0.21.0 - For line-delimited json files, pandas can also return an iterator which reads in ``chunksize`` lines at a time. This can be useful for large files or to read from a stream. .. ipython:: python @@ -4646,8 +4642,6 @@ Read from a feather file. Parquet ------- -.. versionadded:: 0.21.0 - `Apache Parquet <https://parquet.apache.org/>`__ provides a partitioned binary columnar serialization for data frames. It is designed to make reading and writing data frames efficient, and to make sharing data across data analysis languages easy. Parquet can use a variety of compression techniques to shrink the file size as much as possible diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst index 49f4bbb6beb19..0450c81958a51 100644 --- a/doc/source/user_guide/merging.rst +++ b/doc/source/user_guide/merging.rst @@ -573,8 +573,6 @@ all standard database join operations between ``DataFrame`` or named ``Series`` dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. - .. versionadded:: 0.21.0 - .. note:: Support for specifying index levels as the ``on``, ``left_on``, and @@ -773,8 +771,6 @@ Here is another example with duplicate join keys in DataFrames: Checking for duplicate keys ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. versionadded:: 0.21.0 - Users can use the ``validate`` argument to automatically check whether there are unexpected duplicates in their merge keys. Key uniqueness is checked before merge operations and so should protect against memory overflows. Checking key diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 9802b29b1dbc7..276c2d5198831 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1197,8 +1197,6 @@ def infer_dtype(value: object, skipna: bool = True) -> str: skipna : bool, default True Ignore NaN values when inferring the type. - .. versionadded:: 0.21.0 - Returns ------- str diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 55c42f59f865e..ad82d68baa5b3 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -242,8 +242,6 @@ class Categorical(ExtensionArray, PandasObject): dtype : CategoricalDtype An instance of ``CategoricalDtype`` to use for this categorical. - .. versionadded:: 0.21.0 - Attributes ---------- categories : Index @@ -257,8 +255,6 @@ class Categorical(ExtensionArray, PandasObject): The instance of ``CategoricalDtype`` storing the ``categories`` and ``ordered``. - .. versionadded:: 0.21.0 - Methods ------- from_codes @@ -876,8 +872,6 @@ def rename_categories(self, new_categories, inplace=False): are passed through and extra categories in the mapping are ignored. - .. versionadded:: 0.21.0. - * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. @@ -1306,7 +1300,6 @@ def __setstate__(self, state): if not isinstance(state, dict): raise Exception("invalid pickle state") - # compat with pre 0.21.0 CategoricalDtype change if "_dtype" not in state: state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"]) diff --git a/pandas/core/common.py b/pandas/core/common.py index 4ff1a93737d41..8b152162dc95a 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -357,8 +357,6 @@ def standardize_mapping(into): """ Helper function to standardize a supplied mapping. - .. versionadded:: 0.21.0 - Parameters ---------- into : instance or subclass of collections.abc.Mapping diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 17c4c6ba1c701..4be5da9c4c54a 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -189,8 +189,6 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): """ Type for categorical data with the categories and orderedness. - .. versionchanged:: 0.21.0 - Parameters ---------- categories : sequence, optional diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 71b755bbf9665..ddb7be405d77a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -245,8 +245,6 @@ dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. - .. versionadded:: 0.21.0 - Returns ------- DataFrame @@ -1339,8 +1337,6 @@ def to_dict(self, orient="dict", into=dict): instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. - .. versionadded:: 0.21.0 - Returns ------- dict, list or collections.abc.Mapping @@ -2118,8 +2114,6 @@ def to_parquet( """ Write a DataFrame to the binary parquet format. - .. versionadded:: 0.21.0 - This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See @@ -3749,13 +3743,9 @@ def drop( index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). - - .. versionadded:: 0.21.0 columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). - - .. versionadded:: 0.21.0 level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c202bf846047f..9640c1e087f47 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -526,13 +526,6 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False): Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. - .. versionchanged:: 0.21.0 - - The signature is now `labels` and `axis`, consistent with - the rest of pandas API. Previously, the `axis` and `labels` - arguments were respectively the first and second positional - arguments. - Parameters ---------- labels : list-like, Index @@ -1178,8 +1171,6 @@ def _set_axis_name(self, name, axis=0, inplace=False): inplace : bool, default False If `True`, do operation inplace and return None. - .. versionadded:: 0.21.0 - Returns ------- Series, DataFrame, or None @@ -2146,7 +2137,6 @@ def to_json( only used when the first argument is a filename. By default, the compression is inferred from the filename. - .. versionadded:: 0.21.0 .. versionchanged:: 0.24.0 'infer' option added and set to default index : bool, default True @@ -2663,7 +2653,6 @@ def to_pickle( parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. - .. versionadded:: 0.21.0. See Also -------- @@ -3794,8 +3783,6 @@ def reindex_like( the same size as the index and its dtype must exactly match the index's type. - .. versionadded:: 0.21.0 (list-like tolerance) - Returns ------- Series or DataFrame @@ -4235,8 +4222,6 @@ def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries: the same size as the index and its dtype must exactly match the index's type. - .. versionadded:: 0.21.0 (list-like tolerance) - Returns ------- %(klass)s with changed index. @@ -5750,8 +5735,6 @@ def infer_objects(self: FrameOrSeries) -> FrameOrSeries: columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. - .. versionadded:: 0.21.0 - Returns ------- converted : same type as input object @@ -7287,8 +7270,6 @@ def clip( Align object with lower and upper along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. - - .. versionadded:: 0.21.0 *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5fec68d257167..b97f0366579b3 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2836,8 +2836,6 @@ def get_loc(self, key, method=None, tolerance=None): the index at the matching location most satisfy the equation ``abs(index[loc] - key) <= tolerance``. - .. versionadded:: 0.21.0 (list-like tolerance) - Returns ------- loc : int if unique index, slice if monotonic index, else mask @@ -2909,8 +2907,6 @@ def get_loc(self, key, method=None, tolerance=None): the same size as the index and its dtype must exactly match the index's type. - .. versionadded:: 0.21.0 (list-like tolerance) - Returns ------- indexer : ndarray of int diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 073e1967678ec..635bf32639075 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -91,8 +91,6 @@ class CategoricalIndex(ExtensionIndex, accessor.PandasDelegate): dtype : CategoricalDtype or "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. - - .. versionadded:: 0.21.0 copy : bool, default False Make a copy of input ndarray. name : object, optional diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 92c3b9125d269..68d6229e798f5 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1014,16 +1014,10 @@ def bdate_range( Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. - - .. versionadded:: 0.21.0 - holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. - - .. versionadded:: 0.21.0 - closed : str, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 2e1dcf8da5bd4..b17092caabdd1 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -467,8 +467,6 @@ def nearest(self, limit=None): limit : int, optional Limit of how many values to fill. - .. versionadded:: 0.21.0 - Returns ------- Series or DataFrame diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index b3b0166334413..17473ac26dfd6 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -500,9 +500,6 @@ def crosstab( margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. - - .. versionadded:: 0.21.0 - dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False diff --git a/pandas/core/series.py b/pandas/core/series.py index 03b82365358ac..2b073b3c5cebf 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1509,8 +1509,6 @@ def to_dict(self, into=dict): instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. - .. versionadded:: 0.21.0 - Returns ------- collections.abc.Mapping @@ -4067,12 +4065,8 @@ def drop( index : single label or list-like Redundant for application on Series, but 'index' can be used instead of 'labels'. - - .. versionadded:: 0.21.0 columns : single label or list-like No change is made to the Series; use 'index' or 'labels' instead. - - .. versionadded:: 0.21.0 level : int or level name, optional For MultiIndex, level for which the labels will be removed. inplace : bool, default False diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 718534e42ec25..fecdf3b758f0f 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -765,8 +765,6 @@ def where( Updates the HTML representation with a style which is selected in accordance with the return value of a function. - .. versionadded:: 0.21.0 - Parameters ---------- cond : callable diff --git a/pandas/io/html.py b/pandas/io/html.py index ce6674ffb9588..442a2791fc6e6 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -1057,8 +1057,6 @@ def read_html( the header, otherwise the function attempts to find the header within the body (by putting rows with only ``<th>`` elements into the header). - .. versionadded:: 0.21.0 - Similar to :func:`~read_csv` the `header` argument is applied **after** `skiprows` is applied. diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index d6b90ae99973e..b955b83dbfde5 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -490,9 +490,6 @@ def read_json( for more information on ``chunksize``. This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. - - .. versionadded:: 0.21.0 - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, zip or xz if path_or_buf is a string ending in @@ -500,8 +497,6 @@ def read_json( otherwise. If using 'zip', the ZIP file must contain only one data file to be read in. Set to None for no decompression. - .. versionadded:: 0.21.0 - Returns ------- Series or DataFrame diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 9ae9729fc05ee..46320355512d1 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -260,8 +260,6 @@ def read_parquet(path, engine: str = "auto", columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. - .. versionadded:: 0.21.0 - Parameters ---------- path : str, path object or file-like object @@ -287,8 +285,6 @@ def read_parquet(path, engine: str = "auto", columns=None, **kwargs): 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. - - .. versionadded:: 0.21.1 **kwargs Any additional kwargs are passed to the engine. diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 4e731b8ecca11..6faebf56a11ab 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -43,7 +43,6 @@ def to_pickle( HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html - .. versionadded:: 0.21.0 See Also -------- diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 8c213803170a3..3e4b25088e094 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -306,9 +306,6 @@ def read_hdf( By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. - - .. versionadded:: 0.21.0 support for __fspath__ protocol. - key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. @@ -1462,8 +1459,6 @@ def info(self) -> str: """ Print detailed information on the store. - .. versionadded:: 0.21.0 - Returns ------- str
xref #29126
https://api.github.com/repos/pandas-dev/pandas/pulls/33301
2020-04-05T13:26:44Z
2020-04-05T19:21:30Z
2020-04-05T19:21:30Z
2020-04-13T14:41:51Z
BUG: DataFrame._item_cache not cleared on on .copy()
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index d283d4450e6bf..7cb7db27ae603 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -364,6 +364,7 @@ Indexing - Bug in :class:`Index` constructor where an unhelpful error message was raised for ``numpy`` scalars (:issue:`33017`) - Bug in :meth:`DataFrame.lookup` incorrectly raising an ``AttributeError`` when ``frame.index`` or ``frame.columns`` is not unique; this will now raise a ``ValueError`` with a helpful error message (:issue:`33041`) - Bug in :meth:`DataFrame.iloc.__setitem__` creating a new array instead of overwriting ``Categorical`` values in-place (:issue:`32831`) +- Bug in :meth:`DataFrame.copy` _item_cache not invalidated after copy causes post-copy value updates to not be reflected (:issue:`31784`) Missing ^^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9640c1e087f47..82cc45ee16c00 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5665,6 +5665,7 @@ def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries: dtype: object """ data = self._data.copy(deep=deep) + self._clear_item_cache() return self._constructor(data).__finalize__(self) def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries: diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 91627b46c2fee..4149485be181d 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -540,3 +540,21 @@ def test_attrs(self): result = df.rename(columns=str) assert result.attrs == {"version": 1} + + def test_cache_on_copy(self): + # GH 31784 _item_cache not cleared on copy causes incorrect reads after updates + df = DataFrame({"a": [1]}) + + df["x"] = [0] + df["a"] + + df.copy() + + df["a"].values[0] = -1 + + tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0]})) + + df["y"] = [0] + + assert df["a"].values[0] == -1 + tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0], "y": [0]}))
- [x] closes #31784 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33299
2020-04-05T05:26:53Z
2020-04-06T14:33:38Z
2020-04-06T14:33:38Z
2020-04-07T19:52:35Z
CLN: assorted cleanups
diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd index b08592755f2ee..d7af7636df753 100644 --- a/pandas/_libs/tslibs/timedeltas.pxd +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -1,6 +1,6 @@ from numpy cimport int64_t # Exposed for tslib, not intended for outside use. -cdef int64_t cast_from_unit(object ts, object unit) except? -1 +cdef int64_t cast_from_unit(object ts, str unit) except? -1 cpdef int64_t delta_to_nanoseconds(delta) except? -1 cdef convert_to_timedelta64(object ts, object unit) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f2b77f3517a25..3af2279e2440f 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -257,10 +257,15 @@ def array_to_timedelta64(object[:] values, unit='ns', errors='raise'): return iresult.base # .base to access underlying np.ndarray -cpdef inline object precision_from_unit(object unit): +cpdef inline object precision_from_unit(str unit): """ Return a casting of the unit represented to nanoseconds + the precision to round the fractional part. + + Notes + ----- + The caller is responsible for ensuring that the default value of "ns" + takes the place of None. """ cdef: int64_t m @@ -301,7 +306,7 @@ cpdef inline object precision_from_unit(object unit): return m, p -cdef inline int64_t cast_from_unit(object ts, object unit) except? -1: +cdef inline int64_t cast_from_unit(object ts, str unit) except? -1: """ return a casting of the unit represented to nanoseconds round the fractional part of a float to our precision, p """ cdef: @@ -525,15 +530,24 @@ cdef inline timedelta_from_spec(object number, object frac, object unit): return cast_from_unit(float(n), unit) -cpdef inline object parse_timedelta_unit(object unit): +cpdef inline str parse_timedelta_unit(object unit): """ Parameters ---------- - unit : an unit string + unit : str or None + + Returns + ------- + str + Canonical unit string. + + Raises + ------ + ValueError : on non-parseable input """ if unit is None: - return 'ns' - elif unit == 'M': + return "ns" + elif unit == "M": return unit try: return timedelta_abbrevs[unit.lower()] @@ -622,14 +636,14 @@ def _binary_op_method_timedeltalike(op, name): # ---------------------------------------------------------------------- # Timedelta Construction -cdef inline int64_t parse_iso_format_string(object ts) except? -1: +cdef inline int64_t parse_iso_format_string(str ts) except? -1: """ Extracts and cleanses the appropriate values from a match object with groups for each component of an ISO 8601 duration Parameters ---------- - ts: + ts: str ISO 8601 Duration formatted string Returns diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3363d22686f96..9a49b9de2b5ef 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4500,6 +4500,8 @@ def _reindex_with_indexers( allow_dups=allow_dups, copy=copy, ) + # If we've made a copy once, no need to make another one + copy = False if copy and new_data is self._mgr: new_data = new_data.copy() @@ -6459,7 +6461,6 @@ def replace( ): if not ( is_scalar(to_replace) - or isinstance(to_replace, pd.Series) or is_re_compilable(to_replace) or is_list_like(to_replace) ): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index d0319e9181bad..df58593bc930c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,3 +1,4 @@ +from copy import copy as copy_func from datetime import datetime import operator from textwrap import dedent @@ -5313,7 +5314,7 @@ def _add_numeric_methods_unary(cls): Add in numeric unary methods. """ - def _make_evaluate_unary(op, opstr): + def _make_evaluate_unary(op, opstr: str_t): def _evaluate_numeric_unary(self): attrs = self._get_attributes_dict() @@ -5419,7 +5420,7 @@ def _add_logical_methods(cls): """ ) - def _make_logical_function(name, desc, f): + def _make_logical_function(name: str_t, desc: str_t, f): @Substitution(outname=name, desc=desc) @Appender(_index_shared_docs["index_" + name]) @Appender(_doc) @@ -5508,15 +5509,15 @@ def ensure_index_from_sequences(sequences, names=None): return MultiIndex.from_arrays(sequences, names=names) -def ensure_index(index_like, copy=False): +def ensure_index(index_like, copy: bool = False): """ Ensure that we have an index from some index-like object. Parameters ---------- - index : sequence + index_like : sequence An Index or other sequence - copy : bool + copy : bool, default False Returns ------- @@ -5567,9 +5568,7 @@ def ensure_index(index_like, copy=False): # clean_index_list does the equivalent of copying # so only need to do this if not list instance if copy: - from copy import copy - - index_like = copy(index_like) + index_like = copy_func(index_like) return Index(index_like) @@ -5596,7 +5595,7 @@ def _trim_front(strings): return trimmed -def _validate_join_method(method): +def _validate_join_method(method: str): if method not in ["left", "right", "inner", "outer"]: raise ValueError(f"do not recognize join method {method}") diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 06cd62c61b366..22a44d65a947a 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -625,7 +625,7 @@ def _ensure_listlike_indexer(self, key, axis=None): Parameters ---------- - key : _LocIndexer key or list-like of column labels + key : list-like of column labels Target labels. axis : key axis if known """ @@ -636,7 +636,7 @@ def _ensure_listlike_indexer(self, key, axis=None): return if isinstance(key, tuple): - # key may be a tuple if key is a _LocIndexer key + # key may be a tuple if we are .loc # in that case, set key to the column part of key key = key[column_axis] axis = column_axis @@ -649,9 +649,7 @@ def _ensure_listlike_indexer(self, key, axis=None): and all(is_hashable(k) for k in key) ): for k in key: - try: - self.obj[k] - except KeyError: + if k not in self.obj: self.obj[k] = np.nan def __setitem__(self, key, value): diff --git a/pandas/core/series.py b/pandas/core/series.py index 5ed8241101925..b74e80642dcdb 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -415,7 +415,7 @@ def _set_axis(self, axis: int, labels, fastpath: bool = False) -> None: object.__setattr__(self, "_index", labels) if not fastpath: - # The ensure_index call aabove ensures we have an Index object + # The ensure_index call above ensures we have an Index object self._mgr.set_axis(axis, labels) # ndarray compatibility
Mostly annotations
https://api.github.com/repos/pandas-dev/pandas/pulls/33297
2020-04-04T23:59:58Z
2020-04-07T23:08:25Z
2020-04-07T23:08:24Z
2020-04-07T23:30:08Z
BUG: Timestamp+- ndarray[td64]
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 8bff34dbdadad..2c417d89a40b0 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -304,6 +304,7 @@ Datetimelike - :class:`Timestamp` raising confusing error message when year, month or day is missing (:issue:`31200`) - Bug in :class:`DatetimeIndex` constructor incorrectly accepting ``bool``-dtyped inputs (:issue:`32668`) - Bug in :meth:`DatetimeIndex.searchsorted` not accepting a ``list`` or :class:`Series` as its argument (:issue:`32762`) +- Bug in :class:`Timestamp` arithmetic when adding or subtracting a ``np.ndarray`` with ``timedelta64`` dtype (:issue:`33296`) Timedelta ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/c_timestamp.pyx b/pandas/_libs/tslibs/c_timestamp.pyx index 3c30460a74ece..04fadf220388f 100644 --- a/pandas/_libs/tslibs/c_timestamp.pyx +++ b/pandas/_libs/tslibs/c_timestamp.pyx @@ -253,6 +253,13 @@ cdef class _Timestamp(datetime): elif is_array(other): if other.dtype.kind in ['i', 'u']: raise integer_op_not_supported(self) + if other.dtype.kind == "m": + if self.tz is None: + return self.asm8 + other + return np.asarray( + [self + other[n] for n in range(len(other))], + dtype=object, + ) # index/series like elif hasattr(other, '_typ'): @@ -275,6 +282,13 @@ cdef class _Timestamp(datetime): elif is_array(other): if other.dtype.kind in ['i', 'u']: raise integer_op_not_supported(self) + if other.dtype.kind == "m": + if self.tz is None: + return self.asm8 - other + return np.asarray( + [self - other[n] for n in range(len(other))], + dtype=object, + ) typ = getattr(other, '_typ', None) if typ is not None: diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py index ee70d1d0432fc..b038ee1aee106 100644 --- a/pandas/tests/scalar/timestamp/test_arithmetic.py +++ b/pandas/tests/scalar/timestamp/test_arithmetic.py @@ -6,6 +6,7 @@ from pandas.errors import OutOfBoundsDatetime from pandas import Timedelta, Timestamp +import pandas._testing as tm from pandas.tseries import offsets from pandas.tseries.frequencies import to_offset @@ -177,29 +178,6 @@ def test_timestamp_add_timedelta64_unit(self, other, expected_difference): valdiff = result.value - ts.value assert valdiff == expected_difference - @pytest.mark.parametrize("ts", [Timestamp.now(), Timestamp.now("utc")]) - @pytest.mark.parametrize( - "other", - [ - 1, - np.int64(1), - np.array([1, 2], dtype=np.int32), - np.array([3, 4], dtype=np.uint64), - ], - ) - def test_add_int_no_freq_raises(self, ts, other): - msg = "Addition/subtraction of integers and integer-arrays" - with pytest.raises(TypeError, match=msg): - ts + other - with pytest.raises(TypeError, match=msg): - other + ts - - with pytest.raises(TypeError, match=msg): - ts - other - msg = "unsupported operand type" - with pytest.raises(TypeError, match=msg): - other - ts - @pytest.mark.parametrize( "ts", [ @@ -229,3 +207,52 @@ def test_add_int_with_freq(self, ts, other): msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): other - ts + + @pytest.mark.parametrize("shape", [(6,), (2, 3,)]) + def test_addsub_m8ndarray(self, shape): + # GH#33296 + ts = Timestamp("2020-04-04 15:45") + other = np.arange(6).astype("m8[h]").reshape(shape) + + result = ts + other + + ex_stamps = [ts + Timedelta(hours=n) for n in range(6)] + expected = np.array([x.asm8 for x in ex_stamps], dtype="M8[ns]").reshape(shape) + tm.assert_numpy_array_equal(result, expected) + + result = other + ts + tm.assert_numpy_array_equal(result, expected) + + result = ts - other + ex_stamps = [ts - Timedelta(hours=n) for n in range(6)] + expected = np.array([x.asm8 for x in ex_stamps], dtype="M8[ns]").reshape(shape) + tm.assert_numpy_array_equal(result, expected) + + msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timestamp'" + with pytest.raises(TypeError, match=msg): + other - ts + + @pytest.mark.parametrize("shape", [(6,), (2, 3,)]) + def test_addsub_m8ndarray_tzaware(self, shape): + # GH#33296 + ts = Timestamp("2020-04-04 15:45", tz="US/Pacific") + + other = np.arange(6).astype("m8[h]").reshape(shape) + + result = ts + other + + ex_stamps = [ts + Timedelta(hours=n) for n in range(6)] + expected = np.array(ex_stamps).reshape(shape) + tm.assert_numpy_array_equal(result, expected) + + result = other + ts + tm.assert_numpy_array_equal(result, expected) + + result = ts - other + ex_stamps = [ts - Timedelta(hours=n) for n in range(6)] + expected = np.array(ex_stamps).reshape(shape) + tm.assert_numpy_array_equal(result, expected) + + msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timestamp'" + with pytest.raises(TypeError, match=msg): + other - ts
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Noticed a now-duplicate test that this gets rid of
https://api.github.com/repos/pandas-dev/pandas/pulls/33296
2020-04-04T22:55:57Z
2020-04-06T22:16:28Z
2020-04-06T22:16:28Z
2020-04-06T22:35:25Z
REF: make kwargs explicit in BlockManager methods
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index bd90325114ee1..d8b54fd5cffb3 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1282,7 +1282,7 @@ def diff(self, n: int, axis: int = 1) -> List["Block"]: new_values = _block_shape(new_values, ndim=self.ndim) return [self.make_block(values=new_values)] - def shift(self, periods, axis: int = 0, fill_value=None): + def shift(self, periods: int, axis: int = 0, fill_value=None): """ shift the block by periods, possibly upcast """ # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2f1206e800d9b..fa8799512ed05 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -545,14 +545,24 @@ def get_axe(block, qs, axes): def isna(self, func) -> "BlockManager": return self.apply("apply", func=func) - def where(self, **kwargs) -> "BlockManager": - if kwargs.pop("align", True): + def where( + self, other, cond, align: bool, errors: str, try_cast: bool, axis: int + ) -> "BlockManager": + if align: align_keys = ["other", "cond"] else: align_keys = ["cond"] - kwargs["other"] = extract_array(kwargs["other"], extract_numpy=True) + other = extract_array(other, extract_numpy=True) - return self.apply("where", align_keys=align_keys, **kwargs) + return self.apply( + "where", + align_keys=align_keys, + other=other, + cond=cond, + errors=errors, + try_cast=try_cast, + axis=axis, + ) def setitem(self, indexer, value) -> "BlockManager": return self.apply("setitem", indexer=indexer, value=value) @@ -584,11 +594,13 @@ def diff(self, n: int, axis: int) -> "BlockManager": def interpolate(self, **kwargs) -> "BlockManager": return self.apply("interpolate", **kwargs) - def shift(self, **kwargs) -> "BlockManager": - return self.apply("shift", **kwargs) + def shift(self, periods: int, axis: int, fill_value) -> "BlockManager": + return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value) - def fillna(self, **kwargs) -> "BlockManager": - return self.apply("fillna", **kwargs) + def fillna(self, value, limit, inplace: bool, downcast) -> "BlockManager": + return self.apply( + "fillna", value=value, limit=limit, inplace=inplace, downcast=downcast + ) def downcast(self) -> "BlockManager": return self.apply("downcast") @@ -753,9 +765,7 @@ def combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager": new_blocks = [] for b in blocks: b = b.copy(deep=copy) - b.mgr_locs = algos.take_1d( - inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False - ) + b.mgr_locs = inv_indexer[b.mgr_locs.indexer] new_blocks.append(b) axes = list(self.axes)
https://api.github.com/repos/pandas-dev/pandas/pulls/33295
2020-04-04T20:36:12Z
2020-04-05T19:17:00Z
2020-04-05T19:17:00Z
2020-04-05T19:44:39Z
REF: BlockManager.combine -> _combine
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2f1206e800d9b..59d3fbc306947 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -729,7 +729,7 @@ def get_bool_data(self, copy: bool = False) -> "BlockManager": Whether to copy the blocks """ self._consolidate_inplace() - return self.combine([b for b in self.blocks if b.is_bool], copy) + return self._combine([b for b in self.blocks if b.is_bool], copy) def get_numeric_data(self, copy: bool = False) -> "BlockManager": """ @@ -739,9 +739,9 @@ def get_numeric_data(self, copy: bool = False) -> "BlockManager": Whether to copy the blocks """ self._consolidate_inplace() - return self.combine([b for b in self.blocks if b.is_numeric], copy) + return self._combine([b for b in self.blocks if b.is_numeric], copy) - def combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager": + def _combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager": """ return a new manager with the blocks """ if len(blocks) == 0: return self.make_empty() @@ -896,7 +896,7 @@ def to_dict(self, copy: bool = True): for b in self.blocks: bd.setdefault(str(b.dtype), []).append(b) - return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()} + return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()} def fast_xs(self, loc: int) -> ArrayLike: """
https://api.github.com/repos/pandas-dev/pandas/pulls/33294
2020-04-04T20:24:10Z
2020-04-05T19:12:08Z
2020-04-05T19:12:08Z
2020-04-05T19:44:07Z
CLN: remove BlockManager.__contains__
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2f1206e800d9b..08131a977bda3 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -781,9 +781,6 @@ def get_slice(self, slobj: slice, axis: int = 0) -> "BlockManager": bm = type(self)(new_blocks, new_axes, do_integrity_check=False) return bm - def __contains__(self, item) -> bool: - return item in self.items - @property def nblocks(self) -> int: return len(self.blocks) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 657849874f091..57fbc9ab13f84 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -301,10 +301,6 @@ def test_duplicate_ref_loc_failure(self): mgr = BlockManager(blocks, axes) mgr.iget(1) - def test_contains(self, mgr): - assert "a" in mgr - assert "baz" not in mgr - def test_pickle(self, mgr): mgr2 = tm.round_trip_pickle(mgr)
https://api.github.com/repos/pandas-dev/pandas/pulls/33293
2020-04-04T20:21:36Z
2020-04-05T20:04:27Z
2020-04-05T20:04:27Z
2020-04-05T20:08:46Z
REGR: Fix bug when replacing categorical value with self
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 7cb7db27ae603..5bd45e100c8cd 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -294,6 +294,7 @@ Categorical - Bug when passing categorical data to :class:`Index` constructor along with ``dtype=object`` incorrectly returning a :class:`CategoricalIndex` instead of object-dtype :class:`Index` (:issue:`32167`) - Bug where :class:`Categorical` comparison operator ``__ne__`` would incorrectly evaluate to ``False`` when either element was missing (:issue:`32276`) - :meth:`Categorical.fillna` now accepts :class:`Categorical` ``other`` argument (:issue:`32420`) +- Bug where :meth:`Categorical.replace` would replace with ``NaN`` whenever the new value and replacement value were equal (:issue:`33288`) Datetimelike ^^^^^^^^^^^^ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index ad82d68baa5b3..c9b8db28e0cf6 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2447,6 +2447,8 @@ def replace(self, to_replace, value, inplace: bool = False): # other cases, like if both to_replace and value are list-like or if # to_replace is a dict, are handled separately in NDFrame for replace_value, new_value in replace_dict.items(): + if new_value == replace_value: + continue if replace_value in cat.categories: if isna(new_value): cat.remove_categories(replace_value, inplace=True) diff --git a/pandas/tests/arrays/categorical/test_algos.py b/pandas/tests/arrays/categorical/test_algos.py index 10c454f7c479a..325fa476d70e6 100644 --- a/pandas/tests/arrays/categorical/test_algos.py +++ b/pandas/tests/arrays/categorical/test_algos.py @@ -64,6 +64,8 @@ def test_isin_cats(): [ ("b", "c", ["a", "c"], "Categorical.categories are different"), ("c", "d", ["a", "b"], None), + # https://github.com/pandas-dev/pandas/issues/33288 + ("a", "a", ["a", "b"], None), ("b", None, ["a", None], "Categorical.categories length are different"), ], )
- [ ] closes #33288 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33292
2020-04-04T20:04:59Z
2020-04-06T22:01:57Z
2020-04-06T22:01:57Z
2020-05-26T09:35:15Z
DEPR: Index.is_mixed
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 8bff34dbdadad..d283d4450e6bf 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -256,6 +256,7 @@ Deprecations - :meth:`DataFrame.to_dict` has deprecated accepting short names for ``orient`` in future versions (:issue:`32515`) - :meth:`Categorical.to_dense` is deprecated and will be removed in a future version, use ``np.asarray(cat)`` instead (:issue:`32639`) - The ``fastpath`` keyword in the ``SingleBlockManager`` constructor is deprecated and will be removed in a future version (:issue:`33092`) +- :meth:`Index.is_mixed` is deprecated and will be removed in a future version, check ``index.inferred_type`` directly instead (:issue:`32922`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5fec68d257167..394224e6a843f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1955,6 +1955,12 @@ def is_mixed(self) -> bool: >>> idx.is_mixed() False """ + warnings.warn( + "Index.is_mixed is deprecated and will be removed in a future version. " + "Check index.inferred_type directly instead.", + FutureWarning, + stacklevel=2, + ) return self.inferred_type in ["mixed"] def holds_integer(self) -> bool: @@ -3135,7 +3141,7 @@ def is_int(v): # convert the slice to an indexer here # if we are mixed and have integers - if is_positional and self.is_mixed(): + if is_positional: try: # Validate start & stop if start is not None: diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 35ee81229b716..0417208868314 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1160,6 +1160,12 @@ def test_intersection_difference(self, indices, sort): diff = indices.difference(indices, sort=sort) tm.assert_index_equal(inter, diff) + def test_is_mixed_deprecated(self): + # GH#32922 + index = self.create_index() + with tm.assert_produces_warning(FutureWarning): + index.is_mixed() + @pytest.mark.parametrize( "indices, expected", [
- [x] closes #32922 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33291
2020-04-04T20:01:59Z
2020-04-05T19:53:13Z
2020-04-05T19:53:13Z
2020-04-05T19:54:26Z
BUG: 2D indexing on DTA/TDA/PA
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index c0bbbebac7c33..4fabd8f558fee 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -550,10 +550,7 @@ def __getitem__(self, key): key = np.asarray(key, dtype=bool) key = check_array_indexer(self, key) - if key.all(): - key = slice(0, None, None) - else: - key = lib.maybe_booleans_to_slice(key.view(np.uint8)) + key = lib.maybe_booleans_to_slice(key.view(np.uint8)) elif isinstance(key, list) and len(key) == 1 and isinstance(key[0], slice): # see https://github.com/pandas-dev/pandas/issues/31299, need to allow # this for now (would otherwise raise in check_array_indexer) @@ -561,7 +558,7 @@ def __getitem__(self, key): else: key = check_array_indexer(self, key) - is_period = is_period_dtype(self) + is_period = is_period_dtype(self.dtype) if is_period: freq = self.freq else: @@ -577,11 +574,6 @@ def __getitem__(self, key): freq = self.freq result = getitem(key) - if result.ndim > 1: - # To support MPL which performs slicing with 2 dim - # even though it only has 1 dim by definition - return result - return self._simple_new(result, dtype=self.dtype, freq=freq) def __setitem__( diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index f38a4fb83c64f..c752990531b34 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -214,7 +214,10 @@ class ExtensionIndex(Index): def __getitem__(self, key): result = self._data[key] if isinstance(result, type(self._data)): - return type(self)(result, name=self.name) + if result.ndim == 1: + return type(self)(result, name=self.name) + # Unpack to ndarray for MPL compat + result = result._data # Includes cases where we get a 2D ndarray back for MPL compat deprecate_ndim_indexing(result) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 83995ab26cb56..fe35344f46688 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -60,6 +60,12 @@ def timedelta_index(request): class SharedTests: index_cls: Type[Union[DatetimeIndex, PeriodIndex, TimedeltaIndex]] + @pytest.fixture + def arr1d(self): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9 + arr = self.array_cls(data, freq="D") + return arr + def test_compare_len1_raises(self): # make sure we raise when comparing with different lengths, specific # to the case where one has length-1, which numpy would broadcast @@ -204,6 +210,18 @@ def test_searchsorted(self): result = arr.searchsorted(pd.NaT) assert result == 0 + def test_getitem_2d(self, arr1d): + # 2d slicing on a 1D array + expected = type(arr1d)(arr1d._data[:, np.newaxis], dtype=arr1d.dtype) + result = arr1d[:, np.newaxis] + tm.assert_equal(result, expected) + + # Lookup on a 2D array + arr2d = expected + expected = type(arr2d)(arr2d._data[:3, 0], dtype=arr2d.dtype) + result = arr2d[:3, 0] + tm.assert_equal(result, expected) + def test_setitem(self): data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9 arr = self.array_cls(data, freq="D") @@ -265,6 +283,13 @@ class TestDatetimeArray(SharedTests): array_cls = DatetimeArray dtype = pd.Timestamp + @pytest.fixture + def arr1d(self, tz_naive_fixture): + tz = tz_naive_fixture + dti = pd.date_range("2016-01-01 01:01:00", periods=3, freq="H", tz=tz) + dta = dti._data + return dta + def test_round(self, tz_naive_fixture): # GH#24064 tz = tz_naive_fixture @@ -645,6 +670,10 @@ class TestPeriodArray(SharedTests): array_cls = PeriodArray dtype = pd.Period + @pytest.fixture + def arr1d(self, period_index): + return period_index._data + def test_from_pi(self, period_index): pi = period_index arr = PeriodArray(pi)
Broken off from #32997. The fixture this implements in the datetimelike tests can be used in a follow-up to clean up those tests a bit
https://api.github.com/repos/pandas-dev/pandas/pulls/33290
2020-04-04T19:23:36Z
2020-04-06T22:17:19Z
2020-04-06T22:17:19Z
2020-04-06T22:33:48Z
DOC: Improved doc for `Index.equals`
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5fec68d257167..9a254a499ad82 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4193,15 +4193,64 @@ def putmask(self, mask, value): # coerces to object return self.astype(object).putmask(mask, value) - def equals(self, other) -> bool: + def equals(self, other: Any) -> bool: """ - Determine if two Index objects contain the same elements. + Determine if two Index object are equal. + + The things that are being compared are: + + * The elements inside the Index object. + * The order of the elements inside the Index object. + + Parameters + ---------- + other : Any + The other object to compare against. Returns ------- bool - True if "other" is an Index and it has the same elements as calling - index; False otherwise. + True if "other" is an Index and it has the same elements and order + as the calling index; False otherwise. + + Examples + -------- + >>> idx1 = pd.Index([1, 2, 3]) + >>> idx1 + Int64Index([1, 2, 3], dtype='int64') + >>> idx1.equals(pd.Index([1, 2, 3])) + True + + The elements inside are compared + + >>> idx2 = pd.Index(["1", "2", "3"]) + >>> idx2 + Index(['1', '2', '3'], dtype='object') + + >>> idx1.equals(idx2) + False + + The oreder is compared + + >>> ascending_idx = pd.Index([1, 2, 3]) + >>> ascending_idx + Int64Index([1, 2, 3], dtype='int64') + >>> descending_idx = pd.Index([3, 2, 1]) + >>> descending_idx + Int64Index([3, 2, 1], dtype='int64') + >>> ascending_idx.equals(descending_idx) + False + + The dtype is *not* compared + + >>> int64_idx = pd.Int64Index([1, 2, 3]) + >>> int64_idx + Int64Index([1, 2, 3], dtype='int64') + >>> uint64_idx = pd.UInt64Index([1, 2, 3]) + >>> uint64_idx + UInt64Index([1, 2, 3], dtype='uint64') + >>> int64_idx.equals(uint64_idx) + True """ if self.is_(other): return True
- [x] closes #33285 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33289
2020-04-04T19:13:10Z
2020-04-06T08:29:12Z
2020-04-06T08:29:11Z
2020-04-06T08:29:43Z
CLN: Add/refine type hints to some functions in core.dtypes.cast
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index da9646aa8c46f..6cd0948f9beb8 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -3,6 +3,7 @@ """ from datetime import date, datetime, timedelta +from typing import TYPE_CHECKING, Type import numpy as np @@ -63,6 +64,7 @@ ABCDataFrame, ABCDatetimeArray, ABCDatetimeIndex, + ABCExtensionArray, ABCPeriodArray, ABCPeriodIndex, ABCSeries, @@ -70,6 +72,10 @@ from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import isna, notna +if TYPE_CHECKING: + from pandas import Series + from pandas.core.arrays import ExtensionArray # noqa: F401 + _int8_max = np.iinfo(np.int8).max _int16_max = np.iinfo(np.int16).max _int32_max = np.iinfo(np.int32).max @@ -246,9 +252,7 @@ def trans(x): return result -def maybe_cast_result( - result, obj: ABCSeries, numeric_only: bool = False, how: str = "" -): +def maybe_cast_result(result, obj: "Series", numeric_only: bool = False, how: str = ""): """ Try casting result to a different type if appropriate @@ -256,8 +260,8 @@ def maybe_cast_result( ---------- result : array-like Result to cast. - obj : ABCSeries - Input series from which result was calculated. + obj : Series + Input Series from which result was calculated. numeric_only : bool, default False Whether to cast only numerics or datetimes as well. how : str, default "" @@ -313,13 +317,13 @@ def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj: return d.get((dtype, how), dtype) -def maybe_cast_to_extension_array(cls, obj, dtype=None): +def maybe_cast_to_extension_array(cls: Type["ExtensionArray"], obj, dtype=None): """ Call to `_from_sequence` that returns the object unchanged on Exception. Parameters ---------- - cls : ExtensionArray subclass + cls : class, subclass of ExtensionArray obj : arraylike Values to pass to cls._from_sequence dtype : ExtensionDtype, optional @@ -329,6 +333,8 @@ def maybe_cast_to_extension_array(cls, obj, dtype=None): ExtensionArray or obj """ assert isinstance(cls, type), f"must pass a type: {cls}" + assertion_msg = f"must pass a subclass of ExtensionArray: {cls}" + assert issubclass(cls, ABCExtensionArray), assertion_msg try: result = cls._from_sequence(obj, dtype=dtype) except Exception: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 093c925acbc49..88580f6ebb3ed 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -151,7 +151,7 @@ def pinner(cls): @pin_whitelisted_properties(Series, base.series_apply_whitelist) -class SeriesGroupBy(GroupBy): +class SeriesGroupBy(GroupBy[Series]): _apply_whitelist = base.series_apply_whitelist def _iterate_slices(self) -> Iterable[Series]: @@ -815,7 +815,7 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None): @pin_whitelisted_properties(DataFrame, base.dataframe_apply_whitelist) -class DataFrameGroupBy(GroupBy): +class DataFrameGroupBy(GroupBy[DataFrame]): _apply_whitelist = base.dataframe_apply_whitelist @@ -1462,7 +1462,7 @@ def _transform_fast(self, result: DataFrame, func_nm: str) -> DataFrame: for i, _ in enumerate(result.columns): res = algorithms.take_1d(result.iloc[:, i].values, ids) # TODO: we have no test cases that get here with EA dtypes; - # try_cast may not be needed if EAs never get here + # maybe_cast_result may not be needed if EAs never get here if cast: res = maybe_cast_result(res, obj.iloc[:, i], how=func_nm) output.append(res) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ebdb0062491be..4d19f6a79ff87 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -17,6 +17,7 @@ class providing the base-class of operations. Callable, Dict, FrozenSet, + Generic, Hashable, Iterable, List, @@ -24,6 +25,7 @@ class providing the base-class of operations. Optional, Tuple, Type, + TypeVar, Union, ) @@ -353,13 +355,13 @@ def _group_selection_context(groupby): ] -class _GroupBy(PandasObject, SelectionMixin): +class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]): _group_selection = None _apply_whitelist: FrozenSet[str] = frozenset() def __init__( self, - obj: NDFrame, + obj: FrameOrSeries, keys: Optional[_KeysArgType] = None, axis: int = 0, level=None, @@ -995,7 +997,11 @@ def _apply_filter(self, indices, dropna): return filtered -class GroupBy(_GroupBy): +# To track operations that expand dimensions, like ohlc +OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame) + + +class GroupBy(_GroupBy[FrameOrSeries]): """ Class for grouping and aggregating relational data. @@ -2420,8 +2426,8 @@ def tail(self, n=5): return self._selected_obj[mask] def _reindex_output( - self, output: FrameOrSeries, fill_value: Scalar = np.NaN - ) -> FrameOrSeries: + self, output: OutputFrameOrSeries, fill_value: Scalar = np.NaN + ) -> OutputFrameOrSeries: """ If we have categorical groupers, then we might want to make sure that we have a fully re-indexed output to the levels. This means expanding diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 742de397956c0..8d535374a083f 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -682,7 +682,7 @@ def _aggregate_series_pure_python(self, obj: Series, func): assert result is not None result = lib.maybe_convert_objects(result, try_float=0) - # TODO: try_cast back to EA? + # TODO: maybe_cast_to_extension_array? return result, counts
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Some cleanup resulting from PR #32894. In order to get SeriesGroupBy._transform_fast to realize self.obj is a Series, it seemed I needed to add a generic type to GroupBy and _GroupBy. I don't know if there is a better way to do this. Here, one cannot use FrameOrSeries as a Series may return a DataFrame and vice-versa, so I added _GroupByT.
https://api.github.com/repos/pandas-dev/pandas/pulls/33286
2020-04-04T16:18:53Z
2020-04-05T19:39:56Z
2020-04-05T19:39:56Z
2020-07-11T16:02:14Z
BUG: fix boolean array skipna=False for .any() and .all()
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 5c39377899a20..83989002c1e89 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -452,7 +452,7 @@ Missing ^^^^^^^ - Calling :meth:`fillna` on an empty Series now correctly returns a shallow copied object. The behaviour is now consistent with :class:`Index`, :class:`DataFrame` and a non-empty :class:`Series` (:issue:`32543`). - +- Bug in :meth:`~Series.any` and :meth:`~Series.all` incorrectly returning ``<NA>`` for all ``False`` or all ``True`` values using the nulllable boolean dtype and with ``skipna=False`` (:issue:`33253`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index e85534def6b97..7ffbd0d595565 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -520,7 +520,7 @@ def any(self, skipna: bool = True, **kwargs): if skipna: return result else: - if result or len(self) == 0: + if result or len(self) == 0 or not self._mask.any(): return result else: return self.dtype.na_value @@ -587,7 +587,7 @@ def all(self, skipna: bool = True, **kwargs): if skipna: return result else: - if not result or len(self) == 0: + if not result or len(self) == 0 or not self._mask.any(): return result else: return self.dtype.na_value diff --git a/pandas/tests/arrays/boolean/test_reduction.py b/pandas/tests/arrays/boolean/test_reduction.py index ce50266c756a8..5dd5620162a8a 100644 --- a/pandas/tests/arrays/boolean/test_reduction.py +++ b/pandas/tests/arrays/boolean/test_reduction.py @@ -19,6 +19,9 @@ def data(): ([False, pd.NA], False, False, pd.NA, False), ([pd.NA], False, True, pd.NA, pd.NA), ([], False, True, False, True), + # GH-33253: all True / all False values buggy with skipna=False + ([True, True], True, True, True, True), + ([False, False], False, False, False, False), ], ) def test_any_all(values, exp_any, exp_all, exp_any_noskip, exp_all_noskip): diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 8fb035e085d40..fa62d5d8c4983 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -891,6 +891,30 @@ def test_all_any_params(self): with pytest.raises(NotImplementedError): s.all(bool_only=True) + def test_all_any_boolean(self): + # Check skipna, with boolean type + s1 = Series([pd.NA, True], dtype="boolean") + s2 = Series([pd.NA, False], dtype="boolean") + assert s1.all(skipna=False) is pd.NA # NA && True => NA + assert s1.all(skipna=True) + assert s2.any(skipna=False) is pd.NA # NA || False => NA + assert not s2.any(skipna=True) + + # GH-33253: all True / all False values buggy with skipna=False + s3 = Series([True, True], dtype="boolean") + s4 = Series([False, False], dtype="boolean") + assert s3.all(skipna=False) + assert not s4.any(skipna=False) + + # Check level TODO(GH-33449) result should also be boolean + s = pd.Series( + [False, False, True, True, False, True], + index=[0, 0, 1, 1, 2, 2], + dtype="boolean", + ) + tm.assert_series_equal(s.all(level=0), Series([False, True, False])) + tm.assert_series_equal(s.any(level=0), Series([False, True, True])) + def test_timedelta64_analytics(self): # index min/max
- [x] closes #33253 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I added "not self._mask.any()" to check if there is no missing value for both .any() and .all() when skipna is False. It looks fine for the example you provided. ![any_all](https://user-images.githubusercontent.com/43714531/78453697-65cc1400-7661-11ea-9666-2fef60f2e81e.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/33284
2020-04-04T14:36:37Z
2020-04-10T08:37:17Z
2020-04-10T08:37:16Z
2020-04-10T08:37:27Z
DOC: Fix EX01 in DataFrame.drop_duplicates
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 71b755bbf9665..1f73efed82cb9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4673,6 +4673,47 @@ def drop_duplicates( See Also -------- DataFrame.value_counts: Count unique combinations of columns. + + Examples + -------- + Consider dataset containing ramen rating. + + >>> df = pd.DataFrame({ + ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], + ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], + ... 'rating': [4, 4, 3.5, 15, 5] + ... }) + >>> df + brand style rating + 0 Yum Yum cup 4.0 + 1 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 3 Indomie pack 15.0 + 4 Indomie pack 5.0 + + By default, it removes duplicate rows based on all columns. + + >>> df.drop_duplicates() + brand style rating + 0 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 3 Indomie pack 15.0 + 4 Indomie pack 5.0 + + To remove duplicates on specific column(s), use ``subset``. + + >>> df.drop_duplicates(subset=['brand']) + brand style rating + 0 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + + To remove duplicates and keep last occurences, use ``keep``. + + >>> df.drop_duplicates(subset=['brand', 'style'], keep='last') + brand style rating + 1 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 4 Indomie pack 5.0 """ if self.empty: return self.copy()
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Related to #27977. ``` ################################################################################ ################################## Validation ################################## ################################################################################
https://api.github.com/repos/pandas-dev/pandas/pulls/33283
2020-04-04T12:15:30Z
2020-04-10T17:51:59Z
2020-04-10T17:51:59Z
2020-04-10T17:52:03Z
DOC: Fix error in Series.clip and DataFrame.clip
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0ecfbce460b3a..c202bf846047f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7299,6 +7299,12 @@ def clip( Same type as calling object with the values outside the clip boundaries replaced. + See Also + -------- + Series.clip : Trim values at input threshold in series. + DataFrame.clip : Trim values at input threshold in dataframe. + numpy.clip : Clip (limit) the values in an array. + Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Related to #27977. output of `python scripts/validate_docstrings.py pandas.Series.clip and pandas.DataFrame.clip`: ``` ################################################################################ ################################## Validation ################################## ################################################################################ ```
https://api.github.com/repos/pandas-dev/pandas/pulls/33282
2020-04-04T10:13:53Z
2020-04-04T21:23:03Z
2020-04-04T21:23:03Z
2020-04-04T21:23:10Z
ENH: Optimize nrows in read_excel
diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py index 80af2cff41769..1eaccb9f2d897 100644 --- a/asv_bench/benchmarks/io/excel.py +++ b/asv_bench/benchmarks/io/excel.py @@ -11,7 +11,7 @@ def _generate_dataframe(): - N = 2000 + N = 20000 C = 5 df = DataFrame( np.random.randn(N, C), @@ -69,5 +69,9 @@ def time_read_excel(self, engine): fname = self.fname_odf if engine == "odf" else self.fname_excel read_excel(fname, engine=engine) + def time_read_excel_nrows(self, engine): + fname = self.fname_odf if engine == "odf" else self.fname_excel + read_excel(fname, engine=engine, nrows=1) + from ..pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 2243790a663df..e513ae17704b9 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -775,6 +775,7 @@ Performance improvements - Performance improvement in :class:`pandas.core.groupby.RollingGroupby` (:issue:`34052`) - Performance improvement in arithmetic operations (sub, add, mul, div) for MultiIndex (:issue:`34297`) - Performance improvement in `DataFrame[bool_indexer]` when `bool_indexer` is a list (:issue:`33924`) +- Performance improvement in `read_excel` for integer ``header``, ``skiprows``, and ``nrows`` (:issue:`33281`). .. --------------------------------------------------------------------------- diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 6c3b49b9afc68..2216ad7eb6364 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -3,10 +3,12 @@ from io import BytesIO import os from textwrap import fill +from typing import List, Optional, Sequence from pandas._config import config from pandas._libs.parsers import STR_NA_VALUES +from pandas._typing import Scalar, Union from pandas.errors import EmptyDataError from pandas.util._decorators import Appender, deprecate_nonkeyword_arguments @@ -383,9 +385,46 @@ def get_sheet_by_index(self, index): pass @abc.abstractmethod - def get_sheet_data(self, sheet, convert_float): + def get_sheet_data( + self, + sheet, + convert_float: bool, + header: Optional[Union[int, Sequence[int]]], + skiprows: Optional[Union[int, Sequence[int]]], + nrows: Optional[int], + ) -> List[List[Scalar]]: pass + def should_skip_row( + self, + index: int, + header: Optional[Union[int, Sequence[int]]], + skiprows: Optional[Union[int, Sequence[int]]], + nrows: Optional[int], + ) -> bool: + """ + Determines whether row should be read. + + Parameters + ---------- + index : int + Index of row. + header : int, list of int + Rows used as column labels. + skiprows : int, list of int + Rows to skip at the begining. + nrows : int + Number of rows to parse. + + Returns + ------- + Bool determining if row should be skipped. + """ + if nrows is not None and isinstance(header, int) and isinstance(skiprows, int): + if index < header + skiprows - 1: + return True + return False + def parse( self, sheet_name=0, @@ -439,7 +478,16 @@ def parse( else: # assume an integer if not a string sheet = self.get_sheet_by_index(asheetname) - data = self.get_sheet_data(sheet, convert_float) + gsd_header = 0 if header is None else header + gsd_skiprows = 0 if skiprows is None else skiprows + gsd_nrows = nrows if isinstance(nrows, int) else None + + if isinstance(gsd_header, list) or isinstance(gsd_skiprows, list): + gsd_nrows = None + + data = self.get_sheet_data( + sheet, convert_float, gsd_header, gsd_skiprows, gsd_nrows + ) usecols = _maybe_convert_usecols(usecols) if not data: diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 739c77d1c0b99..e30562ca23c3c 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -1,6 +1,6 @@ -from typing import List +from typing import List, Optional, Sequence -from pandas._typing import FilePathOrBuffer, Scalar +from pandas._typing import FilePathOrBuffer, Scalar, Union from pandas.compat._optional import import_optional_dependency import pandas as pd @@ -63,7 +63,14 @@ def get_sheet_by_name(self, name: str): raise ValueError(f"sheet {name} not found") - def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: + def get_sheet_data( + self, + sheet, + convert_float: bool, + header: Optional[Union[int, Sequence[int]]], + skiprows: Optional[Union[int, Sequence[int]]], + nrows: Optional[int], + ) -> List[List[Scalar]]: """ Parse an ODF Table into a list of lists """ @@ -79,7 +86,15 @@ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: table: List[List[Scalar]] = [] + if nrows is not None and isinstance(header, int) and isinstance(skiprows, int): + sheet_rows = sheet_rows[0 : header + skiprows + nrows + 1] + for i, sheet_row in enumerate(sheet_rows): + + if self.should_skip_row(i, header, skiprows, nrows): + table.append([]) + continue + sheet_cells = [x for x in sheet_row.childNodes if x.qname in cell_names] empty_cells = 0 table_row: List[Scalar] = [] diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 0696d82e51f34..4cd9e07e1be12 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -1,8 +1,8 @@ -from typing import List +from typing import List, Optional, Sequence import numpy as np -from pandas._typing import FilePathOrBuffer, Scalar +from pandas._typing import FilePathOrBuffer, Scalar, Union from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import ExcelWriter, _BaseExcelReader @@ -529,9 +529,18 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar: return cell.value - def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: + def get_sheet_data( + self, + sheet, + convert_float: bool, + header: Optional[Union[int, Sequence[int]]], + skiprows: Optional[Union[int, Sequence[int]]], + nrows: Optional[int], + ) -> List[List[Scalar]]: data: List[List[Scalar]] = [] + for row in sheet.rows: + data.append([self._convert_cell(cell, convert_float) for cell in row]) return data diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index 0d96c8c4acdb8..a7962572775b5 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -1,6 +1,6 @@ -from typing import List +from typing import List, Optional, Sequence -from pandas._typing import FilePathOrBuffer, Scalar +from pandas._typing import FilePathOrBuffer, Scalar, Union from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import _BaseExcelReader @@ -62,7 +62,14 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar: return cell.v - def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: + def get_sheet_data( + self, + sheet, + convert_float: bool, + header: Optional[Union[int, Sequence[int]]], + skiprows: Optional[Union[int, Sequence[int]]], + nrows: Optional[int], + ) -> List[List[Scalar]]: return [ [self._convert_cell(c, convert_float) for c in r] for r in sheet.rows(sparse=False) diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index 8f7d3b1368fc7..61040c4158240 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -1,7 +1,9 @@ from datetime import time +from typing import List, Optional, Sequence import numpy as np +from pandas._typing import Scalar, Union from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import _BaseExcelReader @@ -46,7 +48,14 @@ def get_sheet_by_name(self, name): def get_sheet_by_index(self, index): return self.book.sheet_by_index(index) - def get_sheet_data(self, sheet, convert_float): + def get_sheet_data( + self, + sheet, + convert_float: bool, + header: Optional[Union[int, Sequence[int]]], + skiprows: Optional[Union[int, Sequence[int]]], + nrows: Optional[int], + ) -> List[List[Scalar]]: from xlrd import ( xldate, XL_CELL_DATE, @@ -95,9 +104,18 @@ def _parse_cell(cell_contents, cell_typ): cell_contents = val return cell_contents - data = [] + data: List[List[Scalar]] = [] + + sheet_nrows = sheet.nrows + if nrows is not None and isinstance(header, int) and isinstance(skiprows, int): + sheet_nrows = min(header + skiprows + nrows + 1, sheet_nrows) + + for i in range(sheet_nrows): + + if self.should_skip_row(i, header, skiprows, nrows): + data.append([]) + continue - for i in range(sheet.nrows): row = [ _parse_cell(value, typ) for value, typ in zip(sheet.row_values(i), sheet.row_types(i)) diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 955db982f8300..5e685decfa21d 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1143,3 +1143,18 @@ def test_header_with_index_col(self, engine, filename): filename, sheet_name="Sheet1", index_col=0, header=[0, 1] ) tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize("header, skiprows", [(1, 2), (0, 3)]) + @td.check_file_leaks + def test_header_skiprows_nrows(self, engine, read_ext, header, skiprows): + # GH 32727 + data = pd.read_excel("test1" + read_ext, engine=engine) + expected = ( + DataFrame(data.iloc[3:6]) + .reset_index(drop=True) + .rename(columns=data.iloc[2].rename(None)) + ) + actual = pd.read_excel( + "test1" + read_ext, engine=engine, header=header, skiprows=skiprows, nrows=3 + ) + tm.assert_frame_equal(expected, actual)
- [x] closes #32727 - [x] tests passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] If header, skiprows and nrows are integers, rows that will be skipped are not loaded in get_sheet_data function.
https://api.github.com/repos/pandas-dev/pandas/pulls/33281
2020-04-04T01:10:43Z
2020-08-18T19:44:28Z
null
2020-08-18T19:44:29Z
Troubleshoot Travis
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index d9071a80b5db7..b74abc965f7fa 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -15,6 +15,16 @@ from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json import pandas._testing as tm +_seriesd = tm.getSeriesData() + +_frame = DataFrame(_seriesd) + +_cat_frame = _frame.copy() +cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15) +_cat_frame.index = pd.CategoricalIndex(cat, name="E") +_cat_frame["E"] = list(reversed(cat)) +_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64") + def assert_json_roundtrip_equal(result, expected, orient): if orient == "records" or orient == "values": @@ -26,6 +36,12 @@ def assert_json_roundtrip_equal(result, expected, orient): @pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning") class TestPandasContainer: + @pytest.fixture(autouse=True) + def setup(self): + self.categorical = _cat_frame.copy() + + yield + def test_frame_double_encoded_labels(self, orient): df = DataFrame( [["a", "b"], ["c", "d"]], @@ -167,21 +183,25 @@ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype): @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) def test_roundtrip_categorical(self, orient, convert_axes, numpy): - cats = ["a", "b"] - df = pd.DataFrame( - pd.Categorical(cats), index=pd.CategoricalIndex(cats), columns=["cat"] - ) + # TODO: create a better frame to test with and improve coverage + if orient in ("index", "columns"): + pytest.xfail(f"Can't have duplicate index values for orient '{orient}')") - data = df.to_json(orient=orient) - if numpy and orient != "split": + data = self.categorical.to_json(orient=orient) + if numpy and orient in ("records", "values"): pytest.xfail(f"Orient {orient} is broken with numpy=True") result = pd.read_json( data, orient=orient, convert_axes=convert_axes, numpy=numpy ) - # Categorical dtypes are not preserved on round trip - expected = pd.DataFrame(cats, index=cats, columns=["cat"]) + expected = self.categorical.copy() + expected.index = expected.index.astype(str) # Categorical not preserved + expected.index.name = None # index names aren't preserved in JSON + + if not numpy and orient == "index": + expected = expected.sort_index() + assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("convert_axes", [True, False])
Reverts #33228, which appears to be the first build that started failing on Travis.
https://api.github.com/repos/pandas-dev/pandas/pulls/33280
2020-04-04T00:59:49Z
2020-04-04T17:15:47Z
2020-04-04T17:15:47Z
2020-04-04T17:17:08Z
API/CLN: simplify CategoricalBlock.replace
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 2cc0bb07bd17f..e267a7a4e5bd5 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2761,18 +2761,9 @@ def replace( ): inplace = validate_bool_kwarg(inplace, "inplace") result = self if inplace else self.copy() - if filter is None: # replace was called on a series - result.values.replace(to_replace, value, inplace=True) - if convert: - return result.convert(numeric=False, copy=not inplace) - else: - return result - else: # replace was called on a DataFrame - if not isna(value): - result.values.add_categories(value, inplace=True) - return super(CategoricalBlock, result).replace( - to_replace, value, inplace, filter, regex, convert - ) + + result.values.replace(to_replace, value, inplace=True) + return result # ----------------------------------------------------------------- diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index ee89562261b19..a9fb686d5bc50 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1303,9 +1303,15 @@ def test_replace_method(self, to_replace, method, expected): def test_categorical_replace_with_dict(self, replace_dict, final_data): # GH 26988 df = DataFrame([[1, 1], [2, 2]], columns=["a", "b"], dtype="category") - expected = DataFrame(final_data, columns=["a", "b"], dtype="category") - expected["a"] = expected["a"].cat.set_categories([1, 2, 3]) - expected["b"] = expected["b"].cat.set_categories([1, 2, 3]) + + final_data = np.array(final_data) + + a = pd.Categorical(final_data[:, 0], categories=[3, 2]) + + excat = [3, 2] if replace_dict["b"] == 1 else [1, 3] + b = pd.Categorical(final_data[:, 1], categories=excat) + + expected = DataFrame({"a": a, "b": b}) result = df.replace(replace_dict, 3) tm.assert_frame_equal(result, expected) with pytest.raises(AssertionError):
- [x] closes #33272 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Makes DataFrame.replace on Categorical behave like Series.replace.
https://api.github.com/repos/pandas-dev/pandas/pulls/33279
2020-04-04T00:52:22Z
2020-04-06T21:16:03Z
2020-04-06T21:16:03Z
2020-04-06T21:26:20Z
ENH: Added schema kwarg to get_schema method
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index adc1806523d6e..868c9dc3ef3ca 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -54,7 +54,7 @@ For example: Other enhancements ^^^^^^^^^^^^^^^^^^ - :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) -- +- :meth:`get_schema` will now allow a schema kwarg that will add a schema into the create table statement (:issue:`28486`) - .. _whatsnew_120.api_breaking.python: diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 51888e5021d80..4816112b1c087 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -7,7 +7,7 @@ from datetime import date, datetime, time from functools import partial import re -from typing import Iterator, Optional, Union, overload +from typing import Iterator, List, Optional, Union, overload import warnings import numpy as np @@ -1455,9 +1455,22 @@ def drop_table(self, table_name, schema=None): self.get_table(table_name, schema).drop() self.meta.clear() - def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): + def _create_sql_schema( + self, + frame: DataFrame, + table_name: str, + keys: Optional[List[str]] = None, + dtype : Optional[dict] = None, + schema: Optional[str] = None + ): table = SQLTable( - table_name, self, frame=frame, index=False, keys=keys, dtype=dtype + table_name, + self, + frame=frame, + index=False, + keys=keys, + dtype=dtype, + schema=schema, ) return str(table.sql_schema()) @@ -1588,9 +1601,13 @@ def _create_table_setup(self): create_tbl_stmts.append( f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})" ) - + if self.schema: + schema_name = self.schema + "." + else: + schema_name = "" create_stmts = [ "CREATE TABLE " + + schema_name + escape(self.name) + " (\n" + ",\n ".join(create_tbl_stmts) @@ -1845,14 +1862,20 @@ def drop_table(self, name, schema=None): drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}" self.execute(drop_sql) - def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): + def _create_sql_schema(self, frame, table_name, keys=None, dtype=None, schema=None): table = SQLiteTable( - table_name, self, frame=frame, index=False, keys=keys, dtype=dtype + table_name, + self, + frame=frame, + index=False, + keys=keys, + dtype=dtype, + schema=schema, ) return str(table.sql_schema()) -def get_schema(frame, name, keys=None, con=None, dtype=None): +def get_schema(frame, name, keys=None, con=None, dtype=None, schema=None): """ Get the SQL db table schema for the given frame. @@ -1873,4 +1896,6 @@ def get_schema(frame, name, keys=None, con=None, dtype=None): """ pandas_sql = pandasSQL_builder(con=con) - return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype) + return pandas_sql._create_sql_schema( + frame, name, keys=keys, dtype=dtype, schema=schema + ) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index a7e3162ed7b73..a8b2b63ee65b5 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -866,6 +866,13 @@ def test_get_schema(self): create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn) assert "CREATE" in create_sql + # GH28486 + def test_get_schema_with_schema(self): + create_sql = sql.get_schema( + self.test_frame1, "test", con=self.conn, schema="pypi" + ) + assert "CREATE TABLE pypi." in create_sql + def test_get_schema_dtypes(self): float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]}) dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
- [x] closes #28486 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33278
2020-04-03T22:25:26Z
2020-11-29T04:42:41Z
null
2020-11-29T04:42:42Z
CLN 31942/replace appender with doc 3
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b522920ec9f23..ec98ade5c4b2f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2183,9 +2183,10 @@ def to_feather(self, path, **kwargs) -> None: to_feather(self, path, **kwargs) - @Appender( - """ - Examples + @doc( + Series.to_markdown, + klass=_shared_doc_kwargs["klass"], + examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} @@ -2206,10 +2207,8 @@ def to_feather(self, path, **kwargs) -> None: +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+ - """ + """, ) - @Substitution(klass="DataFrame") - @Appender(_shared_docs["to_markdown"]) def to_markdown( self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs ) -> Optional[str]: @@ -4758,20 +4757,20 @@ def _maybe_casted_values(index, labels=None): # ---------------------------------------------------------------------- # Reindex-based selection methods - @Appender(_shared_docs["isna"] % _shared_doc_kwargs) + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) def isna(self) -> "DataFrame": result = self._constructor(self._data.isna(func=isna)) return result.__finalize__(self, method="isna") - @Appender(_shared_docs["isna"] % _shared_doc_kwargs) + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) def isnull(self) -> "DataFrame": return self.isna() - @Appender(_shared_docs["notna"] % _shared_doc_kwargs) + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) def notna(self) -> "DataFrame": return ~self.isna() - @Appender(_shared_docs["notna"] % _shared_doc_kwargs) + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) def notnull(self) -> "DataFrame": return ~self.isna() @@ -7330,13 +7329,14 @@ def _gotitem( """ ) - @Substitution( + @doc( + _shared_docs["aggregate"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], see_also=_agg_summary_and_see_also_doc, examples=_agg_examples_doc, versionadded="\n.. versionadded:: 0.20.0\n", - **_shared_doc_kwargs, ) - @Appender(_shared_docs["aggregate"]) def aggregate(self, func, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) @@ -7364,7 +7364,11 @@ def _aggregate(self, arg, axis=0, *args, **kwargs): agg = aggregate - @Appender(_shared_docs["transform"] % _shared_doc_kwargs) + @doc( + NDFrame.transform, + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + ) def transform(self, func, axis=0, *args, **kwargs) -> "DataFrame": axis = self._get_axis_number(axis) if axis == 1: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bad61a440b8c5..707b1b7fda4f4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -853,7 +853,7 @@ def rename( copy : bool, default True Also copy underlying data. inplace : bool, default False - Whether to return a new %(klass)s. If True then value of copy is + Whether to return a new {klass}. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified @@ -867,7 +867,7 @@ def rename( Returns ------- - renamed : %(klass)s (new object) + renamed : {klass} (new object) Raises ------ @@ -1903,29 +1903,6 @@ def _repr_data_resource_(self): # ---------------------------------------------------------------------- # I/O Methods - _shared_docs[ - "to_markdown" - ] = """ - Print %(klass)s in Markdown-friendly format. - - .. versionadded:: 1.0.0 - - Parameters - ---------- - buf : str, Path or StringIO-like, optional, default None - Buffer to write to. If None, the output is returned as a string. - mode : str, optional - Mode in which file is opened. - **kwargs - These parameters will be passed to `tabulate \ - <https://pypi.org/project/tabulate>`_. - - Returns - ------- - str - %(klass)s in Markdown-friendly format. - """ - @doc(klass="object") def to_excel( self, @@ -4242,9 +4219,15 @@ def sort_values( """ raise AbstractMethodError(self) + @doc( + klass=_shared_doc_kwargs["klass"], + axes=_shared_doc_kwargs["axes"], + optional_labels="", + optional_axis="", + ) def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries: """ - Conform %(klass)s to new index with optional filling logic. + Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and @@ -4252,12 +4235,12 @@ def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries: Parameters ---------- - %(optional_labels)s - %(axes)s : array-like, optional + {optional_labels} + {axes} : array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data. - %(optional_axis)s - method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} + {optional_axis} + method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. @@ -4291,7 +4274,7 @@ def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries: Returns ------- - %(klass)s with changed index. + {klass} with changed index. See Also -------- @@ -4304,7 +4287,7 @@ def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries: ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` - * ``(labels, axis={'index', 'columns'}, ...)`` + * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. @@ -4312,8 +4295,8 @@ def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries: Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] - >>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301], - ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}, + >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], + ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time @@ -4384,7 +4367,7 @@ def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries: of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') - >>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]}, + >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices @@ -5018,19 +5001,19 @@ def sample( locs = rs.choice(axis_length, size=n, replace=replace, p=weights) return self.take(locs, axis=axis) - _shared_docs[ - "pipe" - ] = r""" + @doc(klass=_shared_doc_kwargs["klass"]) + def pipe(self, func, *args, **kwargs): + r""" Apply func(self, \*args, \*\*kwargs). Parameters ---------- func : function - Function to apply to the %(klass)s. + Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of - ``callable`` that expects the %(klass)s. + ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional @@ -5070,121 +5053,49 @@ def sample( ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ - - @Appender(_shared_docs["pipe"] % _shared_doc_kwargs) - def pipe(self, func, *args, **kwargs): return com.pipe(self, func, *args, **kwargs) _shared_docs["aggregate"] = dedent( """ - Aggregate using one or more operations over the specified axis. - %(versionadded)s - Parameters - ---------- - func : function, str, list or dict - Function to use for aggregating the data. If a function, must either - work when passed a %(klass)s or when passed to %(klass)s.apply. - - Accepted combinations are: - - - function - - string function name - - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` - - dict of axis labels -> functions, function names or list of such. - %(axis)s - *args - Positional arguments to pass to `func`. - **kwargs - Keyword arguments to pass to `func`. - - Returns - ------- - scalar, Series or DataFrame - - The return can be: - - * scalar : when Series.agg is called with single function - * Series : when DataFrame.agg is called with a single function - * DataFrame : when DataFrame.agg is called with several functions - - Return scalar, Series or DataFrame. - %(see_also)s - Notes - ----- - `agg` is an alias for `aggregate`. Use the alias. - - A passed user-defined-function will be passed a Series for evaluation. - %(examples)s""" - ) + Aggregate using one or more operations over the specified axis. + {versionadded} + Parameters + ---------- + func : function, str, list or dict + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - dict of axis labels -> functions, function names or list of such. + {axis} + *args + Positional arguments to pass to `func`. + **kwargs + Keyword arguments to pass to `func`. - _shared_docs[ - "transform" - ] = """ - Call ``func`` on self producing a %(klass)s with transformed values. + Returns + ------- + scalar, Series or DataFrame - Produced %(klass)s will have same axis length as self. + The return can be: - Parameters - ---------- - func : function, str, list or dict - Function to use for transforming the data. If a function, must either - work when passed a %(klass)s or when passed to %(klass)s.apply. - - Accepted combinations are: - - - function - - string function name - - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']`` - - dict of axis labels -> functions, function names or list of such. - %(axis)s - *args - Positional arguments to pass to `func`. - **kwargs - Keyword arguments to pass to `func`. - - Returns - ------- - %(klass)s - A %(klass)s that must have the same length as self. - - Raises - ------ - ValueError : If the returned %(klass)s has a different length than self. - - See Also - -------- - %(klass)s.agg : Only perform aggregating type operations. - %(klass)s.apply : Invoke function on a %(klass)s. - - Examples - -------- - >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)}) - >>> df - A B - 0 0 1 - 1 1 2 - 2 2 3 - >>> df.transform(lambda x: x + 1) - A B - 0 1 2 - 1 2 3 - 2 3 4 - - Even though the resulting %(klass)s must have the same length as the - input %(klass)s, it is possible to provide several input functions: - - >>> s = pd.Series(range(3)) - >>> s - 0 0 - 1 1 - 2 2 - dtype: int64 - >>> s.transform([np.sqrt, np.exp]) - sqrt exp - 0 0.000000 1.000000 - 1 1.000000 2.718282 - 2 1.414214 7.389056 - """ + * scalar : when Series.agg is called with single function + * Series : when DataFrame.agg is called with a single function + * DataFrame : when DataFrame.agg is called with several functions + + Return scalar, Series or DataFrame. + {see_also} + Notes + ----- + `agg` is an alias for `aggregate`. Use the alias. + + A passed user-defined-function will be passed a Series for evaluation. + {examples}""" + ) # ---------------------------------------------------------------------- # Attribute access @@ -6199,7 +6110,7 @@ def ffill( Returns ------- - %(klass)s or None + {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( @@ -6220,7 +6131,7 @@ def bfill( Returns ------- - %(klass)s or None + {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( @@ -6691,9 +6602,18 @@ def replace( else: return result.__finalize__(self, method="replace") - _shared_docs[ - "interpolate" - ] = """ + def interpolate( + self: FrameOrSeries, + method: str = "linear", + axis: Axis = 0, + limit: Optional[int] = None, + inplace: bool_t = False, + limit_direction: Optional[str] = None, + limit_area: Optional[str] = None, + downcast: Optional[str] = None, + **kwargs, + ) -> Optional[FrameOrSeries]: + """ Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. @@ -6721,14 +6641,14 @@ def replace( `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. - axis : {0 or 'index', 1 or 'columns', None}, default None + axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. - limit_direction : {'forward', 'backward', 'both'}, Optional + limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: @@ -6746,7 +6666,7 @@ def replace( raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. - limit_area : {`None`, 'inside', 'outside'}, default None + limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. @@ -6888,22 +6808,6 @@ def replace( 3 16.0 Name: d, dtype: float64 """ - - @Appender(_shared_docs["interpolate"] % _shared_doc_kwargs) - def interpolate( - self: FrameOrSeries, - method: str = "linear", - axis: Axis = 0, - limit: Optional[int] = None, - inplace: bool_t = False, - limit_direction: Optional[str] = None, - limit_area: Optional[str] = None, - downcast: Optional[str] = None, - **kwargs, - ) -> Optional[FrameOrSeries]: - """ - Interpolate values according to different methods. - """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) @@ -7159,9 +7063,9 @@ def asof(self, where, subset=None): # ---------------------------------------------------------------------- # Action Methods - _shared_docs[ - "isna" - ] = """ + @doc(klass=_shared_doc_kwargs["klass"]) + def isna(self: FrameOrSeries) -> FrameOrSeries: + """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. @@ -7173,26 +7077,26 @@ def asof(self, where, subset=None): Returns ------- - %(klass)s - Mask of bool values for each element in %(klass)s that + {klass} + Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- - %(klass)s.isnull : Alias of isna. - %(klass)s.notna : Boolean inverse of isna. - %(klass)s.dropna : Omit axes labels with missing values. + {klass}.isnull : Alias of isna. + {klass}.notna : Boolean inverse of isna. + {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. - >>> df = pd.DataFrame({'age': [5, 6, np.NaN], + >>> df = pd.DataFrame({{'age': [5, 6, np.NaN], ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... 'name': ['Alfred', 'Batman', ''], - ... 'toy': [None, 'Batmobile', 'Joker']}) + ... 'toy': [None, 'Batmobile', 'Joker']}}) >>> df age born name toy 0 5.0 NaT Alfred None @@ -7220,18 +7124,15 @@ def asof(self, where, subset=None): 2 True dtype: bool """ - - @Appender(_shared_docs["isna"] % _shared_doc_kwargs) - def isna(self: FrameOrSeries) -> FrameOrSeries: return isna(self).__finalize__(self, method="isna") - @Appender(_shared_docs["isna"] % _shared_doc_kwargs) + @doc(isna, klass=_shared_doc_kwargs["klass"]) def isnull(self: FrameOrSeries) -> FrameOrSeries: return isna(self).__finalize__(self, method="isnull") - _shared_docs[ - "notna" - ] = """ + @doc(klass=_shared_doc_kwargs["klass"]) + def notna(self: FrameOrSeries) -> FrameOrSeries: + """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. @@ -7243,26 +7144,26 @@ def isnull(self: FrameOrSeries) -> FrameOrSeries: Returns ------- - %(klass)s - Mask of bool values for each element in %(klass)s that + {klass} + Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- - %(klass)s.notnull : Alias of notna. - %(klass)s.isna : Boolean inverse of notna. - %(klass)s.dropna : Omit axes labels with missing values. + {klass}.notnull : Alias of notna. + {klass}.isna : Boolean inverse of notna. + {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. - >>> df = pd.DataFrame({'age': [5, 6, np.NaN], + >>> df = pd.DataFrame({{'age': [5, 6, np.NaN], ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... 'name': ['Alfred', 'Batman', ''], - ... 'toy': [None, 'Batmobile', 'Joker']}) + ... 'toy': [None, 'Batmobile', 'Joker']}}) >>> df age born name toy 0 5.0 NaT Alfred None @@ -7290,12 +7191,9 @@ def isnull(self: FrameOrSeries) -> FrameOrSeries: 2 False dtype: bool """ - - @Appender(_shared_docs["notna"] % _shared_doc_kwargs) - def notna(self: FrameOrSeries) -> FrameOrSeries: return notna(self).__finalize__(self, method="notna") - @Appender(_shared_docs["notna"] % _shared_doc_kwargs) + @doc(notna, klass=_shared_doc_kwargs["klass"]) def notnull(self: FrameOrSeries) -> FrameOrSeries: return notna(self).__finalize__(self, method="notnull") @@ -8977,32 +8875,47 @@ def _where( result = self._constructor(new_data) return result.__finalize__(self) - _shared_docs[ - "where" - ] = """ - Replace values where the condition is %(cond_rev)s. + @doc( + klass=_shared_doc_kwargs["klass"], + cond="True", + cond_rev="False", + name="where", + name_other="mask", + ) + def where( + self, + cond, + other=np.nan, + inplace=False, + axis=None, + level=None, + errors="raise", + try_cast=False, + ): + """ + Replace values where the condition is {cond_rev}. Parameters ---------- - cond : bool %(klass)s, array-like, or callable - Where `cond` is %(cond)s, keep the original value. Where - %(cond_rev)s, replace with corresponding value from `other`. - If `cond` is callable, it is computed on the %(klass)s and - should return boolean %(klass)s or array. The callable must - not change input %(klass)s (though pandas doesn't check it). - other : scalar, %(klass)s, or callable - Entries where `cond` is %(cond_rev)s are replaced with + cond : bool {klass}, array-like, or callable + Where `cond` is {cond}, keep the original value. Where + {cond_rev}, replace with corresponding value from `other`. + If `cond` is callable, it is computed on the {klass} and + should return boolean {klass} or array. The callable must + not change input {klass} (though pandas doesn't check it). + other : scalar, {klass}, or callable + Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. - If other is callable, it is computed on the %(klass)s and - should return scalar or %(klass)s. The callable must not - change input %(klass)s (though pandas doesn't check it). + If other is callable, it is computed on the {klass} and + should return scalar or {klass}. The callable must not + change input {klass} (though pandas doesn't check it). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. level : int, default None Alignment level if needed. - errors : str, {'raise', 'ignore'}, default 'raise' + errors : str, {{'raise', 'ignore'}}, default 'raise' Note that currently this parameter won't affect the results and will always coerce to a suitable dtype. @@ -9018,13 +8931,13 @@ def _where( See Also -------- - :func:`DataFrame.%(name_other)s` : Return an object of same shape as + :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- - The %(name)s method is an application of the if-then idiom. For each - element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the + The {name} method is an application of the if-then idiom. For each + element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. @@ -9032,7 +8945,7 @@ def _where( :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. - For further details and examples see the ``%(name)s`` documentation in + For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. Examples @@ -9070,7 +8983,7 @@ def _where( 2 4 5 3 6 7 4 8 9 - >>> m = df %% 3 == 0 + >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 @@ -9093,42 +9006,18 @@ def _where( 3 True True 4 True True """ - - @Appender( - _shared_docs["where"] - % dict( - _shared_doc_kwargs, - cond="True", - cond_rev="False", - name="where", - name_other="mask", - ) - ) - def where( - self, - cond, - other=np.nan, - inplace=False, - axis=None, - level=None, - errors="raise", - try_cast=False, - ): - other = com.apply_if_callable(other, self) return self._where( cond, other, inplace, axis, level, errors=errors, try_cast=try_cast ) - @Appender( - _shared_docs["where"] - % dict( - _shared_doc_kwargs, - cond="False", - cond_rev="True", - name="mask", - name_other="where", - ) + @doc( + where, + klass=_shared_doc_kwargs["klass"], + cond="False", + cond_rev="True", + name="mask", + name_other="where", ) def mask( self, @@ -9518,7 +9407,7 @@ def tz_convert( Returns ------- - %(klass)s + {klass} Object with time zone converted axis. Raises @@ -10141,9 +10030,15 @@ def describe_1d(data): d.columns = data.columns.copy() return d - _shared_docs[ - "pct_change" - ] = """ + def pct_change( + self: FrameOrSeries, + periods=1, + fill_method="pad", + limit=None, + freq=None, + **kwargs, + ) -> FrameOrSeries: + """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by @@ -10257,17 +10152,6 @@ def describe_1d(data): GOOG NaN -0.151997 -0.086016 APPL NaN 0.337604 0.012002 """ - - @Appender(_shared_docs["pct_change"] % _shared_doc_kwargs) - def pct_change( - self: FrameOrSeries, - periods=1, - fill_method="pad", - limit=None, - freq=None, - **kwargs, - ) -> FrameOrSeries: - # TODO: Not sure if above is correct - need someone to confirm. axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self @@ -10327,18 +10211,35 @@ def _add_numeric_operations(cls): empty_value=True, ) - @Substitution( + @doc( desc="Return the mean absolute deviation of the values " "for the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, - min_count="", see_also="", examples="", ) - @Appender(_num_doc_mad) def mad(self, axis=None, skipna=None, level=None): + """ + {desc} + + Parameters + ---------- + axis : {axis_descr} + Axis for the function to be applied on. + skipna : bool, default None + Exclude NA/null values when computing the result. + level : int or level name, default None + If the axis is a MultiIndex (hierarchical), count along a + particular level, collapsing into a {name1}. + + Returns + ------- + {name1} or {name2} (if level specified)\ + {see_also}\ + {examples} + """ if skipna is None: skipna = True if axis is None: @@ -10603,8 +10504,74 @@ def ewm( cls.ewm = ewm - @Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs)) + @doc(klass=_shared_doc_kwargs["klass"], axis="") def transform(self, func, *args, **kwargs): + """ + Call ``func`` on self producing a {klass} with transformed values. + + Produced {klass} will have same axis length as self. + + Parameters + ---------- + func : function, str, list or dict + Function to use for transforming the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']`` + - dict of axis labels -> functions, function names or list of such. + {axis} + *args + Positional arguments to pass to `func`. + **kwargs + Keyword arguments to pass to `func`. + + Returns + ------- + {klass} + A {klass} that must have the same length as self. + + Raises + ------ + ValueError : If the returned {klass} has a different length than self. + + See Also + -------- + {klass}.agg : Only perform aggregating type operations. + {klass}.apply : Invoke function on a {klass}. + + Examples + -------- + >>> df = pd.DataFrame({{'A': range(3), 'B': range(1, 4)}}) + >>> df + A B + 0 0 1 + 1 1 2 + 2 2 3 + >>> df.transform(lambda x: x + 1) + A B + 0 1 2 + 1 2 3 + 2 3 4 + + Even though the resulting {klass} must have the same length as the + input {klass}, it is possible to provide several input functions: + + >>> s = pd.Series(range(3)) + >>> s + 0 0 + 1 1 + 2 2 + dtype: int64 + >>> s.transform([np.sqrt, np.exp]) + sqrt exp + 0 0.000000 1.000000 + 1 1.000000 2.718282 + 2 1.414214 7.389056 + """ result = self.agg(func, *args, **kwargs) if is_scalar(result) or len(result) != len(self): raise ValueError("transforms cannot produce aggregated results") @@ -10614,21 +10581,6 @@ def transform(self, func, *args, **kwargs): # ---------------------------------------------------------------------- # Misc methods - _shared_docs[ - "valid_index" - ] = """ - Return index for %(position)s non-NA/null value. - - Returns - ------- - scalar : type of index - - Notes - ----- - If all elements are non-NA/null, returns None. - Also returns None for empty %(klass)s. - """ - def _find_valid_index(self, how: str): """ Retrieves the index of the first valid value. @@ -10647,15 +10599,23 @@ def _find_valid_index(self, how: str): return None return self.index[idxpos] - @Appender( - _shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"} - ) + @doc(position="first", klass=_shared_doc_kwargs["klass"]) def first_valid_index(self): + """ + Return index for {position} non-NA/null value. + + Returns + ------- + scalar : type of index + + Notes + ----- + If all elements are non-NA/null, returns None. + Also returns None for empty {klass}. + """ return self._find_valid_index("first") - @Appender( - _shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"} - ) + @doc(first_valid_index, position="last", klass=_shared_doc_kwargs["klass"]) def last_valid_index(self): return self._find_valid_index("last") @@ -10696,26 +10656,6 @@ def _doc_parms(cls): %(examples)s """ -_num_doc_mad = """ -%(desc)s - -Parameters ----------- -axis : %(axis_descr)s - Axis for the function to be applied on. -skipna : bool, default None - Exclude NA/null values when computing the result. -level : int or level name, default None - If the axis is a MultiIndex (hierarchical), count along a - particular level, collapsing into a %(name1)s. - -Returns -------- -%(name1)s or %(name2)s (if level specified)\ -%(see_also)s\ -%(examples)s -""" - _num_ddof_doc = """ %(desc)s diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index db5df9818b0b0..128f7cd6cd90c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -224,10 +224,9 @@ def _selection_name(self): def apply(self, func, *args, **kwargs): return super().apply(func, *args, **kwargs) - @Substitution( - examples=_agg_examples_doc, klass="Series", + @doc( + _agg_template, examples=_agg_examples_doc, klass="Series", ) - @Appender(_agg_template) def aggregate( self, func=None, *args, engine="cython", engine_kwargs=None, **kwargs ): @@ -915,10 +914,9 @@ class DataFrameGroupBy(GroupBy[DataFrame]): See :ref:`groupby.aggregate.named` for more.""" ) - @Substitution( - examples=_agg_examples_doc, klass="DataFrame", + @doc( + _agg_template, examples=_agg_examples_doc, klass="DataFrame", ) - @Appender(_agg_template) def aggregate( self, func=None, *args, engine="cython", engine_kwargs=None, **kwargs ): diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index c2be8d96402df..e4baee1e9cb97 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -372,7 +372,7 @@ class providing the base-class of operations. ---------- func : function, str, list or dict Function to use for aggregating the data. If a function, must either - work when passed a %(klass)s or when passed to %(klass)s.apply. + work when passed a {klass} or when passed to {klass}.apply. Accepted combinations are: @@ -403,7 +403,7 @@ class providing the base-class of operations. * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` and ``parallel`` dictionary keys. The values must either be ``True`` or ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is - ``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be applied to the function .. versionadded:: 1.1.0 @@ -412,20 +412,20 @@ class providing the base-class of operations. Returns ------- -%(klass)s +{klass} See Also -------- -%(klass)s.groupby.apply -%(klass)s.groupby.transform -%(klass)s.aggregate +{klass}.groupby.apply +{klass}.groupby.transform +{klass}.aggregate Notes ----- When using ``engine='numba'``, there will be no "fall back" behavior internally. The group data and group index will be passed as numpy arrays to the JITed user defined function, and no alternative execution attempts will be tried. -%(examples)s +{examples} """ diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 5e363f2814d39..bfdfc65723433 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -274,14 +274,14 @@ def pipe(self, func, *args, **kwargs): """ ) - @Substitution( + @doc( + _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, versionadded="", klass="DataFrame", axis="", ) - @Appender(_shared_docs["aggregate"]) def aggregate(self, func, *args, **kwargs): self._set_binner() diff --git a/pandas/core/series.py b/pandas/core/series.py index b32a4c36a8247..a6e5cf9eb7a8a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1410,8 +1410,46 @@ def to_string( with open(buf, "w") as f: f.write(result) - @Appender( + @doc( + klass=_shared_doc_kwargs["klass"], + examples=dedent( + """ + Examples + -------- + >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") + >>> print(s.to_markdown()) + | | animal | + |---:|:---------| + | 0 | elk | + | 1 | pig | + | 2 | dog | + | 3 | quetzal | + """ + ), + ) + def to_markdown( + self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs + ) -> Optional[str]: """ + Print {klass} in Markdown-friendly format. + + .. versionadded:: 1.0.0 + + Parameters + ---------- + buf : str, Path or StringIO-like, optional, default None + Buffer to write to. If None, the output is returned as a string. + mode : str, optional + Mode in which file is opened. + **kwargs + These parameters will be passed to `tabulate \ + <https://pypi.org/project/tabulate>`_. + + Returns + ------- + str + {klass} in Markdown-friendly format. + Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") @@ -1438,12 +1476,6 @@ def to_string( | 3 | quetzal | +----+----------+ """ - ) - @Substitution(klass="Series") - @Appender(generic._shared_docs["to_markdown"]) - def to_markdown( - self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs - ) -> Optional[str]: return self.to_frame().to_markdown(buf, mode, **kwargs) # ---------------------------------------------------------------------- @@ -3964,13 +3996,14 @@ def _gotitem(self, key, ndim, subset=None) -> "Series": """ ) - @Substitution( + @doc( + generic._shared_docs["aggregate"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, versionadded="\n.. versionadded:: 0.20.0\n", - **_shared_doc_kwargs, ) - @Appender(generic._shared_docs["aggregate"]) def aggregate(self, func, axis=0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) @@ -3999,7 +4032,11 @@ def aggregate(self, func, axis=0, *args, **kwargs): agg = aggregate - @Appender(generic._shared_docs["transform"] % _shared_doc_kwargs) + @doc( + NDFrame.transform, + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + ) def transform(self, func, axis=0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) @@ -4190,7 +4227,11 @@ def _needs_reindex_multi(self, axes, method, level): """ return False - @doc(NDFrame.align, **_shared_doc_kwargs) + @doc( + NDFrame.align, + klass=_shared_doc_kwargs["klass"], + axes_single_arg=_shared_doc_kwargs["axes_single_arg"], + ) def align( self, other, @@ -4321,8 +4362,13 @@ def rename( def set_axis(self, labels, axis: Axis = 0, inplace: bool = False): return super().set_axis(labels, axis=axis, inplace=inplace) - @Substitution(**_shared_doc_kwargs) - @Appender(generic.NDFrame.reindex.__doc__) + @doc( + NDFrame.reindex, + klass=_shared_doc_kwargs["klass"], + axes=_shared_doc_kwargs["axes"], + optional_labels=_shared_doc_kwargs["optional_labels"], + optional_axis=_shared_doc_kwargs["optional_axis"], + ) def reindex(self, index=None, **kwargs): return super().reindex(index=index, **kwargs) @@ -4451,7 +4497,7 @@ def fillna( downcast=downcast, ) - @doc(NDFrame.replace, **_shared_doc_kwargs) + @doc(NDFrame.replace, klass=_shared_doc_kwargs["klass"]) def replace( self, to_replace=None, @@ -4470,7 +4516,7 @@ def replace( method=method, ) - @doc(NDFrame.shift, **_shared_doc_kwargs) + @doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "Series": return super().shift( periods=periods, freq=freq, axis=axis, fill_value=fill_value @@ -4691,19 +4737,19 @@ def _convert_dtypes( result = input_series.copy() return result - @Appender(generic._shared_docs["isna"] % _shared_doc_kwargs) + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) def isna(self) -> "Series": return super().isna() - @Appender(generic._shared_docs["isna"] % _shared_doc_kwargs) + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) def isnull(self) -> "Series": return super().isnull() - @Appender(generic._shared_docs["notna"] % _shared_doc_kwargs) + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) def notna(self) -> "Series": return super().notna() - @Appender(generic._shared_docs["notna"] % _shared_doc_kwargs) + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) def notnull(self) -> "Series": return super().notnull() diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 0e39b94574a12..b708020be90d2 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -7,7 +7,7 @@ import pandas._libs.window.aggregations as window_aggregations from pandas._typing import FrameOrSeries from pandas.compat.numpy import function as nv -from pandas.util._decorators import Appender, Substitution +from pandas.util._decorators import Appender, Substitution, doc from pandas.core.dtypes.generic import ABCDataFrame @@ -214,14 +214,14 @@ def _constructor(self): """ ) - @Substitution( + @doc( + _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, versionadded="", klass="Series/Dataframe", axis="", ) - @Appender(_shared_docs["aggregate"]) def aggregate(self, func, *args, **kwargs): return super().aggregate(func, *args, **kwargs) diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index 438032a0c4419..bbc19fad8b799 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -2,7 +2,7 @@ from typing import Dict, Optional from pandas.compat.numpy import function as nv -from pandas.util._decorators import Appender, Substitution +from pandas.util._decorators import Appender, Substitution, doc from pandas.core.window.common import WindowGroupByMixin, _doc_template, _shared_docs from pandas.core.window.rolling import _Rolling_and_Expanding @@ -113,14 +113,14 @@ def _get_window(self, other=None, **kwargs): """ ) - @Substitution( + @doc( + _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, versionadded="", klass="Series/Dataframe", axis="", ) - @Appender(_shared_docs["aggregate"]) def aggregate(self, func, *args, **kwargs): return super().aggregate(func, *args, **kwargs) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 92be2d056cfcb..89f8450ef7bde 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -15,7 +15,7 @@ from pandas._typing import Axis, FrameOrSeries, Scalar from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv -from pandas.util._decorators import Appender, Substitution, cache_readonly +from pandas.util._decorators import Appender, Substitution, cache_readonly, doc from pandas.core.dtypes.common import ( ensure_float64, @@ -1151,14 +1151,14 @@ def _get_window( """ ) - @Substitution( + @doc( + _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, versionadded="", klass="Series/DataFrame", axis="", ) - @Appender(_shared_docs["aggregate"]) def aggregate(self, func, *args, **kwargs): result, how = self._aggregate(func, *args, **kwargs) if result is None: @@ -2020,14 +2020,14 @@ def _validate_freq(self): """ ) - @Substitution( + @doc( + _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, versionadded="", klass="Series/Dataframe", axis="", ) - @Appender(_shared_docs["aggregate"]) def aggregate(self, func, *args, **kwargs): return super().aggregate(func, *args, **kwargs)
- [x] ref #31942 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] replaces `Appender` decorator with `doc`
https://api.github.com/repos/pandas-dev/pandas/pulls/33277
2020-04-03T22:12:25Z
2020-06-19T12:01:14Z
2020-06-19T12:01:14Z
2020-06-19T16:36:01Z
Added finalize benchmark
diff --git a/asv_bench/benchmarks/finalize.py b/asv_bench/benchmarks/finalize.py new file mode 100644 index 0000000000000..dc06f55cc6ca0 --- /dev/null +++ b/asv_bench/benchmarks/finalize.py @@ -0,0 +1,16 @@ +import pandas as pd + + +class Finalize: + param_names = ["series", "frame"] + params = [pd.Series, pd.DataFrame] + + def setup(self, param): + N = 1000 + obj = param(dtype=float) + for i in range(N): + obj.attrs[i] = i + self.obj = obj + + def time_finalize_micro(self, param): + self.obj.__finalize__(self.obj, method="__finalize__")
This adds a benchmark for finalize. It scales with the number of `attrs`.
https://api.github.com/repos/pandas-dev/pandas/pulls/33275
2020-04-03T21:03:52Z
2020-04-06T18:35:58Z
2020-04-06T18:35:58Z
2020-04-06T18:36:01Z
Unpin mypy
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 62a3808d36ba2..0f2a90709fc24 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -2,6 +2,7 @@ Generic data algorithms. This module is experimental at the moment and not intended for public consumption """ +import abc import operator from textwrap import dedent from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union @@ -1065,7 +1066,7 @@ def _get_score(at): # --------------- # -class SelectN: +class SelectN(abc.ABC): def __init__(self, obj, n: int, keep: str): self.obj = obj self.n = n @@ -1090,6 +1091,9 @@ def is_valid_dtype_n_method(dtype) -> bool: is_numeric_dtype(dtype) and not is_complex_dtype(dtype) ) or needs_i8_conversion(dtype) + @abc.abstractmethod + def compute(self, method): ... + class SelectNSeries(SelectN): """ diff --git a/pandas/core/apply.py b/pandas/core/apply.py index a0351cb687d02..3818ca6ed5876 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -145,6 +145,9 @@ def get_result(self): """ compute the results """ # dispatch to agg if is_list_like(self.f) or is_dict_like(self.f): + if "axis" in self.kwds: + self.kwds.pop("axis") + return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds) # all empty diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 6cb597ba75852..5cf04276b36fb 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -13,6 +13,7 @@ from pandas._libs import lib from pandas._typing import ArrayLike +from typing_extensions import Protocol from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -1053,6 +1054,19 @@ def __hash__(self): raise TypeError(f"unhashable type: {repr(type(self).__name__)}") +class OpsExtendable(Protocol): + + @classmethod + def _create_arithmetic_method(cls, op): ... + + @classmethod + def _create_comparison_method(cls, op): ... + + @classmethod + def _create_logical_method(cls, op): ... + + + class ExtensionOpsMixin: """ A base class for linking the operators to their dunder names. @@ -1065,41 +1079,41 @@ class ExtensionOpsMixin: """ @classmethod - def _add_arithmetic_ops(cls): - cls.__add__ = cls._create_arithmetic_method(operator.add) - cls.__radd__ = cls._create_arithmetic_method(ops.radd) - cls.__sub__ = cls._create_arithmetic_method(operator.sub) - cls.__rsub__ = cls._create_arithmetic_method(ops.rsub) - cls.__mul__ = cls._create_arithmetic_method(operator.mul) - cls.__rmul__ = cls._create_arithmetic_method(ops.rmul) - cls.__pow__ = cls._create_arithmetic_method(operator.pow) - cls.__rpow__ = cls._create_arithmetic_method(ops.rpow) - cls.__mod__ = cls._create_arithmetic_method(operator.mod) - cls.__rmod__ = cls._create_arithmetic_method(ops.rmod) - cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv) - cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv) - cls.__truediv__ = cls._create_arithmetic_method(operator.truediv) - cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv) - cls.__divmod__ = cls._create_arithmetic_method(divmod) - cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod) + def _add_arithmetic_ops(cls: OpsExtendable): + setattr(cls, "__add__", cls._create_arithmetic_method(operator.add)) + setattr(cls, "__radd__", cls._create_arithmetic_method(ops.radd)) + setattr(cls, "__sub__", cls._create_arithmetic_method(operator.sub)) + setattr(cls, "__rsub__", cls._create_arithmetic_method(ops.rsub)) + setattr(cls, "__mul__", cls._create_arithmetic_method(operator.mul)) + setattr(cls, "__rmul__", cls._create_arithmetic_method(ops.rmul)) + setattr(cls, "__pow__", cls._create_arithmetic_method(operator.pow)) + setattr(cls, "__rpow__", cls._create_arithmetic_method(ops.rpow)) + setattr(cls, "__mod__", cls._create_arithmetic_method(operator.mod)) + setattr(cls, "__rmod__", cls._create_arithmetic_method(ops.rmod)) + setattr(cls, "__floordiv__", cls._create_arithmetic_method(operator.floordiv)) + setattr(cls, "__rfloordiv__", cls._create_arithmetic_method(ops.rfloordiv)) + setattr(cls, "__truediv__", cls._create_arithmetic_method(operator.truediv)) + setattr(cls, "__rtruediv__", cls._create_arithmetic_method(ops.rtruediv)) + setattr(cls, "__divmod__", cls._create_arithmetic_method(divmod)) + setattr(cls, "__rdivmod__", cls._create_arithmetic_method(ops.rdivmod)) @classmethod - def _add_comparison_ops(cls): - cls.__eq__ = cls._create_comparison_method(operator.eq) - cls.__ne__ = cls._create_comparison_method(operator.ne) - cls.__lt__ = cls._create_comparison_method(operator.lt) - cls.__gt__ = cls._create_comparison_method(operator.gt) - cls.__le__ = cls._create_comparison_method(operator.le) - cls.__ge__ = cls._create_comparison_method(operator.ge) + def _add_comparison_ops(cls: OpsExtendable): + setattr(cls, "__eq__", cls._create_comparison_method(operator.eq)) + setattr(cls, "__ne__", cls._create_comparison_method(operator.ne)) + setattr(cls, "__lt__", cls._create_comparison_method(operator.lt)) + setattr(cls, "__gt__", cls._create_comparison_method(operator.gt)) + setattr(cls, "__le__", cls._create_comparison_method(operator.le)) + setattr(cls, "__ge__", cls._create_comparison_method(operator.ge)) @classmethod - def _add_logical_ops(cls): - cls.__and__ = cls._create_logical_method(operator.and_) - cls.__rand__ = cls._create_logical_method(ops.rand_) - cls.__or__ = cls._create_logical_method(operator.or_) - cls.__ror__ = cls._create_logical_method(ops.ror_) - cls.__xor__ = cls._create_logical_method(operator.xor) - cls.__rxor__ = cls._create_logical_method(ops.rxor) + def _add_logical_ops(cls: OpsExtendable): + setattr(cls, "__and__", cls._create_logical_method(operator.and_)) + setattr(cls, "__rand__", cls._create_logical_method(ops.rand_)) + setattr(cls, "__or__", cls._create_logical_method(operator.or_)) + setattr(cls, "__ror__", cls._create_logical_method(ops.ror_)) + setattr(cls, "__xor__", cls._create_logical_method(operator.xor)) + setattr(cls, "__rxor__", cls._create_logical_method(ops.rxor)) class ExtensionScalarOpsMixin(ExtensionOpsMixin): diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 4fabd8f558fee..7b09879af4673 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1,9 +1,11 @@ +import abc from datetime import datetime, timedelta import operator -from typing import Any, Sequence, Type, Union, cast +from typing import Any, Sequence, Tuple, Type, Union, cast import warnings import numpy as np +from typing_extensions import Protocol from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib from pandas._libs.tslibs.c_timestamp import integer_op_not_supported @@ -212,7 +214,12 @@ def _check_compatible_with( raise AbstractMethodError(self) -class DatelikeOps: +class DatelikeOperable(Protocol): + + def _format_native_types(self, date_format, na_rep): ... + + +class DatelikeOps(abc.ABC): """ Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex. """ @@ -221,7 +228,7 @@ class DatelikeOps: URL="https://docs.python.org/3/library/datetime.html" "#strftime-and-strptime-behavior" ) - def strftime(self, date_format): + def strftime(self: DatelikeOperable, date_format): """ Convert to Index using specified date_format. @@ -260,6 +267,30 @@ def strftime(self, date_format): return result.astype(object) +class TimelikeOperable(Protocol): + + @property + def tz(self): ... + + @property + def dtype(self): ... + + @property + def inferred_freq(self): ... + + def __len__(self): ... + + def _round(self, freq, mode, ambiguous, nonexistent): ... + + def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"): ... + + def view(self, dtype): ... + + def _maybe_mask_results(self, result, fill_value=iNaT, convert=None): ... + + def _simple_new(self, values: np.ndarray, dtype=None, freq=None): ... + + class TimelikeOps: """ Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex. @@ -368,7 +399,7 @@ class TimelikeOps: dtype: datetime64[ns] """ - def _round(self, freq, mode, ambiguous, nonexistent): + def _round(self: TimelikeOperable, freq, mode, ambiguous, nonexistent): # round the local times if is_datetime64tz_dtype(self): # operate on naive timestamps, then convert back to aware @@ -385,18 +416,18 @@ def _round(self, freq, mode, ambiguous, nonexistent): return self._simple_new(result, dtype=self.dtype) @Appender((_round_doc + _round_example).format(op="round")) - def round(self, freq, ambiguous="raise", nonexistent="raise"): + def round(self: TimelikeOperable, freq, ambiguous="raise", nonexistent="raise"): return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent) @Appender((_round_doc + _floor_example).format(op="floor")) - def floor(self, freq, ambiguous="raise", nonexistent="raise"): + def floor(self: TimelikeOperable, freq, ambiguous="raise", nonexistent="raise"): return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) @Appender((_round_doc + _ceil_example).format(op="ceil")) - def ceil(self, freq, ambiguous="raise", nonexistent="raise"): + def ceil(self: TimelikeOperable, freq, ambiguous="raise", nonexistent="raise"): return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) - def _with_freq(self, freq): + def _with_freq(self: TimelikeOperable, freq): """ Helper to set our freq in-place, returning self to allow method chaining. @@ -425,6 +456,73 @@ def _with_freq(self, freq): return self +class DatetimeLikeArrayProtocol(Protocol): + _data: Any + _freq: Any + _recognized_scalars: Any + _resolution: Any + _scalar_type: Any + freq: Any + dtype: Any + asi8: Any + ndim: int + + def __init__(self, values, dtype, freq=None, copy=False): ... + + def __add__(self, other): ... + + @property + def _box_func(self): ... + + @property + def size(self): ... + + @property + def freqstr(self) -> str: ... + + @property + def _isnan(self) -> bool: ... + + @property + def _hasnans(self) -> bool: ... + + def _generate_range(self, start, end, periods, freq, closed_or_fields): ... + + def shape(self) -> Tuple[int, ...]: ... + + def _simple_new(self, values: np.ndarray, dtype=None, freq=None): ... + + def _box_values(self, values): ... + + def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): ... + + def _check_compatible_with(self, other, setitem: bool = False): ... + + def _unbox_scalar(self, value): ... + + def _validate_fill_value(self, fill_value): ... + + def _add_nat(self): ... + + def _add_offset(self, other): ... + + def _add_datetimelike_scalar(self, other): ... + + def _time_shift(self, other): ... + + def _add_datetime_arraylike(self, other): ... + + def _add_timedelta_arraylike(self, other): ... + + def _addsub_int_array(self, other, op): ... + + def _addsub_object_array(self, other, op): ... + + def copy(self): ... + + def isna(self) -> bool: ... + + class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray): """ Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray @@ -438,19 +536,19 @@ class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray) """ @property - def ndim(self) -> int: + def ndim(self: DatetimeLikeArrayProtocol) -> int: return self._data.ndim @property - def shape(self): + def shape(self: DatetimeLikeArrayProtocol): return self._data.shape - def reshape(self, *args, **kwargs): + def reshape(self: DatetimeLikeArrayProtocol, *args, **kwargs): # Note: we drop any freq data = self._data.reshape(*args, **kwargs) return type(self)(data, dtype=self.dtype) - def ravel(self, *args, **kwargs): + def ravel(self: DatetimeLikeArrayProtocol, *args, **kwargs): # Note: we drop any freq data = self._data.ravel(*args, **kwargs) return type(self)(data, dtype=self.dtype) @@ -468,11 +566,11 @@ def _box_values(self, values): """ return lib.map_infer(values, self._box_func) - def __iter__(self): + def __iter__(self: DatetimeLikeArrayProtocol): return (self._box_func(v) for v in self.asi8) @property - def asi8(self) -> np.ndarray: + def asi8(self: DatetimeLikeArrayProtocol) -> np.ndarray: """ Integer representation of the values. @@ -487,7 +585,7 @@ def asi8(self) -> np.ndarray: # ---------------------------------------------------------------- # Rendering Methods - def _format_native_types(self, na_rep="NaT", date_format=None): + def _format_native_types(self: DatetimeLikeArrayProtocol, na_rep="NaT", date_format=None): """ Helper method for astype when converting to strings. @@ -505,7 +603,7 @@ def _formatter(self, boxed=False): # Array-Like / EA-Interface Methods @property - def nbytes(self): + def nbytes(self: DatetimeLikeArrayProtocol): return self._data.nbytes def __array__(self, dtype=None) -> np.ndarray: @@ -515,14 +613,14 @@ def __array__(self, dtype=None) -> np.ndarray: return self._data @property - def size(self) -> int: + def size(self: DatetimeLikeArrayProtocol) -> int: """The number of elements in this array.""" return np.prod(self.shape) def __len__(self) -> int: return len(self._data) - def __getitem__(self, key): + def __getitem__(self: DatetimeLikeArrayProtocol, key): """ This getitem defers to the underlying array, which by-definition can only handle list-likes, slices, and integer scalars @@ -631,7 +729,7 @@ def _maybe_clear_freq(self): # DatetimeArray and TimedeltaArray pass - def astype(self, dtype, copy=True): + def astype(self: DatetimeLikeArrayProtocol, dtype, copy=True): # Some notes on cases we don't have to handle here in the base class: # 1. PeriodArray.astype handles period -> period # 2. DatetimeArray.astype handles conversion between tz. @@ -669,7 +767,7 @@ def astype(self, dtype, copy=True): else: return np.asarray(self, dtype=dtype) - def view(self, dtype=None): + def view(self: DatetimeLikeArrayProtocol, dtype=None): if dtype is None or dtype is self.dtype: return type(self)(self._data, dtype=self.dtype) return self._data.view(dtype=dtype) @@ -677,11 +775,11 @@ def view(self, dtype=None): # ------------------------------------------------------------------ # ExtensionArray Interface - def unique(self): + def unique(self: DatetimeLikeArrayProtocol): result = unique1d(self.asi8) return type(self)(result, dtype=self.dtype) - def _validate_fill_value(self, fill_value): + def _validate_fill_value(self: DatetimeLikeArrayProtocol, fill_value): """ If a fill_value is passed to `take` convert it to an i8 representation, raising ValueError if this is not possible. @@ -710,7 +808,7 @@ def _validate_fill_value(self, fill_value): ) return fill_value - def take(self, indices, allow_fill=False, fill_value=None): + def take(self: DatetimeLikeArrayProtocol, indices, allow_fill=False, fill_value=None): if allow_fill: fill_value = self._validate_fill_value(fill_value) @@ -748,22 +846,22 @@ def _concat_same_type(cls, to_concat): return cls._simple_new(values, dtype=dtype, freq=new_freq) - def copy(self): + def copy(self: DatetimeLikeArrayProtocol): values = self.asi8.copy() return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq) - def _values_for_factorize(self): + def _values_for_factorize(self: DatetimeLikeArrayProtocol): return self.asi8, iNaT @classmethod - def _from_factorized(cls, values, original): + def _from_factorized(cls: DatetimeLikeArrayProtocol, values, original): return cls(values, dtype=original.dtype) - def _values_for_argsort(self): + def _values_for_argsort(self: DatetimeLikeArrayProtocol): return self._data @Appender(ExtensionArray.shift.__doc__) - def shift(self, periods=1, fill_value=None, axis=0): + def shift(self: DatetimeLikeArrayProtocol, periods=1, fill_value=None, axis=0): if not self.size or periods == 0: return self.copy() @@ -799,7 +897,7 @@ def shift(self, periods=1, fill_value=None, axis=0): # These are not part of the EA API, but we implement them because # pandas assumes they're there. - def searchsorted(self, value, side="left", sorter=None): + def searchsorted(self: DatetimeLikeArrayProtocol, value, side="left", sorter=None): """ Find indices where elements should be inserted to maintain order. @@ -859,7 +957,7 @@ def searchsorted(self, value, side="left", sorter=None): # TODO: Use datetime64 semantics for sorting, xref GH#29844 return self.asi8.searchsorted(value, side=side, sorter=sorter) - def repeat(self, repeats, *args, **kwargs): + def repeat(self: DatetimeLikeArrayProtocol, repeats, *args, **kwargs): """ Repeat elements of an array. @@ -871,7 +969,7 @@ def repeat(self, repeats, *args, **kwargs): values = self._data.repeat(repeats) return type(self)(values.view("i8"), dtype=self.dtype) - def value_counts(self, dropna=False): + def value_counts(self: DatetimeLikeArrayProtocol, dropna=False): """ Return a Series containing counts of unique values. @@ -912,24 +1010,24 @@ def map(self, mapper): # ------------------------------------------------------------------ # Null Handling - def isna(self): + def isna(self: DatetimeLikeArrayProtocol): return self._isnan @property # NB: override with cache_readonly in immutable subclasses - def _isnan(self): + def _isnan(self: DatetimeLikeArrayProtocol): """ return if each value is nan """ return self.asi8 == iNaT @property # NB: override with cache_readonly in immutable subclasses - def _hasnans(self): + def _hasnans(self: DatetimeLikeArrayProtocol): """ return if I have any nans; enables various perf speedups """ return bool(self._isnan.any()) - def _maybe_mask_results(self, result, fill_value=iNaT, convert=None): + def _maybe_mask_results(self: DatetimeLikeArrayProtocol, result, fill_value=iNaT, convert=None): """ Parameters ---------- @@ -954,7 +1052,7 @@ def _maybe_mask_results(self, result, fill_value=iNaT, convert=None): result[self._isnan] = fill_value return result - def fillna(self, value=None, method=None, limit=None): + def fillna(self: DatetimeLikeArrayProtocol, value=None, method=None, limit=None): # TODO(GH-20300): remove this # Just overriding to ensure that we avoid an astype(object). # Either 20300 or a `_values_for_fillna` would avoid this duplication. @@ -1005,14 +1103,14 @@ def fillna(self, value=None, method=None, limit=None): # Frequency Properties/Methods @property - def freq(self): + def freq(self: DatetimeLikeArrayProtocol): """ Return the frequency object if it is set, otherwise None. """ return self._freq @freq.setter - def freq(self, value): + def freq(self: DatetimeLikeArrayProtocol, value): if value is not None: value = frequencies.to_offset(value) self._validate_frequency(self, value) @@ -1020,7 +1118,7 @@ def freq(self, value): self._freq = value @property - def freqstr(self): + def freqstr(self: DatetimeLikeArrayProtocol): """ Return the frequency object as a string if its set, otherwise None. """ @@ -1029,7 +1127,7 @@ def freqstr(self): return self.freq.freqstr @property # NB: override with cache_readonly in immutable subclasses - def inferred_freq(self): + def inferred_freq(self: DatetimeLikeArrayProtocol): """ Tryies to return a string representing a frequency guess, generated by infer_freq. Returns None if it can't autodetect the @@ -1043,11 +1141,11 @@ def inferred_freq(self): return None @property # NB: override with cache_readonly in immutable subclasses - def _resolution(self): + def _resolution(self: DatetimeLikeArrayProtocol): return frequencies.Resolution.get_reso_from_freq(self.freqstr) @property # NB: override with cache_readonly in immutable subclasses - def resolution(self): + def resolution(self: DatetimeLikeArrayProtocol): """ Returns day, hour, minute, second, millisecond or microsecond """ @@ -1099,15 +1197,15 @@ def _validate_frequency(cls, index, freq, **kwargs): # see GH#23789 @property - def _is_monotonic_increasing(self): + def _is_monotonic_increasing(self: DatetimeLikeArrayProtocol): return algos.is_monotonic(self.asi8, timelike=True)[0] @property - def _is_monotonic_decreasing(self): + def _is_monotonic_decreasing(self: DatetimeLikeArrayProtocol): return algos.is_monotonic(self.asi8, timelike=True)[1] @property - def _is_unique(self): + def _is_unique(self: DatetimeLikeArrayProtocol): return len(unique1d(self.asi8)) == len(self) # ------------------------------------------------------------------ @@ -1149,7 +1247,7 @@ def _sub_period(self, other): def _add_offset(self, offset): raise AbstractMethodError(self) - def _add_timedeltalike_scalar(self, other): + def _add_timedeltalike_scalar(self: DatetimeLikeArrayProtocol, other): """ Add a delta of a timedeltalike @@ -1179,7 +1277,7 @@ def _add_timedeltalike_scalar(self, other): return type(self)(new_values, dtype=self.dtype, freq=new_freq) return type(self)(new_values, dtype=self.dtype)._with_freq("infer") - def _add_timedelta_arraylike(self, other): + def _add_timedelta_arraylike(self: DatetimeLikeArrayProtocol, other): """ Add a delta of a TimedeltaIndex @@ -1209,7 +1307,7 @@ def _add_timedelta_arraylike(self, other): return type(self)(new_values, dtype=self.dtype)._with_freq("infer") - def _add_nat(self): + def _add_nat(self: DatetimeLikeArrayProtocol): """ Add pd.NaT to self """ @@ -1224,7 +1322,7 @@ def _add_nat(self): result.fill(iNaT) return type(self)(result, dtype=self.dtype, freq=None) - def _sub_nat(self): + def _sub_nat(self: DatetimeLikeArrayProtocol): """ Subtract pd.NaT from self """ @@ -1238,7 +1336,7 @@ def _sub_nat(self): result.fill(iNaT) return result.view("timedelta64[ns]") - def _sub_period_array(self, other): + def _sub_period_array(self: DatetimeLikeArrayProtocol, other): """ Subtract a Period Array/Index from self. This is only valid if self is itself a Period Array/Index, raises otherwise. Both objects must @@ -1274,7 +1372,7 @@ def _sub_period_array(self, other): new_values[mask] = NaT return new_values - def _addsub_object_array(self, other: np.ndarray, op): + def _addsub_object_array(self: DatetimeLikeArrayProtocol, other: np.ndarray, op): """ Add or subtract array-like of DateOffset objects @@ -1305,7 +1403,7 @@ def _addsub_object_array(self, other: np.ndarray, op): result = extract_array(result, extract_numpy=True).reshape(self.shape) return result - def _time_shift(self, periods, freq=None): + def _time_shift(self: DatetimeLikeArrayProtocol, periods, freq=None): """ Shift each value by `periods`. @@ -1343,7 +1441,7 @@ def _time_shift(self, periods, freq=None): return self._generate_range(start=start, end=end, periods=None, freq=self.freq) @unpack_zerodim_and_defer("__add__") - def __add__(self, other): + def __add__(self: DatetimeLikeArrayProtocol, other): # scalar others if other is NaT: @@ -1390,12 +1488,12 @@ def __add__(self, other): return TimedeltaArray(result) return result - def __radd__(self, other): + def __radd__(self: DatetimeLikeArrayProtocol, other): # alias for __add__ return self.__add__(other) @unpack_zerodim_and_defer("__sub__") - def __sub__(self, other): + def __sub__(self: DatetimeLikeArrayProtocol, other): # scalar others if other is NaT: @@ -1444,7 +1542,7 @@ def __sub__(self, other): return TimedeltaArray(result) return result - def __rsub__(self, other): + def __rsub__(self: DatetimeLikeArrayProtocol, other): if is_datetime64_any_dtype(other) and is_timedelta64_dtype(self.dtype): # ndarray[datetime64] cannot be subtracted from self, so # we need to wrap in DatetimeArray/Index and flip the operation @@ -1480,7 +1578,7 @@ def __rsub__(self, other): return -(self - other) - def __iadd__(self, other): # type: ignore + def __iadd__(self: DatetimeLikeArrayProtocol, other): # type: ignore result = self + other self[:] = result[:] @@ -1489,7 +1587,7 @@ def __iadd__(self, other): # type: ignore self._freq = result._freq return self - def __isub__(self, other): # type: ignore + def __isub__(self: DatetimeLikeArrayProtocol, other): # type: ignore result = self - other self[:] = result[:] @@ -1501,14 +1599,14 @@ def __isub__(self, other): # type: ignore # -------------------------------------------------------------- # Reductions - def _reduce(self, name, axis=0, skipna=True, **kwargs): + def _reduce(self: DatetimeLikeArrayProtocol, name, axis=0, skipna=True, **kwargs): op = getattr(self, name, None) if op: return op(skipna=skipna, **kwargs) else: return super()._reduce(name, skipna, **kwargs) - def min(self, axis=None, skipna=True, *args, **kwargs): + def min(self: DatetimeLikeArrayProtocol, axis=None, skipna=True, *args, **kwargs): """ Return the minimum value of the Array or minimum along an axis. @@ -1528,7 +1626,7 @@ def min(self, axis=None, skipna=True, *args, **kwargs): return NaT return self._box_func(result) - def max(self, axis=None, skipna=True, *args, **kwargs): + def max(self: DatetimeLikeArrayProtocol, axis=None, skipna=True, *args, **kwargs): """ Return the maximum value of the Array or maximum along an axis. @@ -1560,7 +1658,7 @@ def max(self, axis=None, skipna=True, *args, **kwargs): # Don't have to worry about NA `result`, since no NA went in. return self._box_func(result) - def mean(self, skipna=True): + def mean(self: DatetimeLikeArrayProtocol, skipna=True): """ Return the mean value of the Array. diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 363286704ba95..cbcc7668eb4ee 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -4,18 +4,41 @@ SeriesGroupBy and the DataFrameGroupBy objects. """ import collections +from typing import List + +from typing_extensions import Protocol from pandas.core.dtypes.common import is_list_like, is_scalar +from pandas._typing import FrameOrSeries + + OutputKey = collections.namedtuple("OutputKey", ["label", "position"]) +class Groupable(Protocol): + + # TODO: These probably shouldn't both be FrameOrSeries + def __init__(self, subset: FrameOrSeries, groupby: FrameOrSeries, parent: "Groupable", **kwargs): ... + + @property + def obj(self) -> FrameOrSeries: ... + + @property + def _attributes(self) -> List[str]: ... + + @property + def _groupby(self) -> FrameOrSeries: ... + + def _reset_cache(self) -> None: ... + + class GroupByMixin: """ Provide the groupby facilities to the mixed object. """ - def _gotitem(self, key, ndim, subset=None): + def _gotitem(self: Groupable, key, ndim, subset=None): """ Sub-classes to define. Return a sliced object. diff --git a/pandas/io/common.py b/pandas/io/common.py index 0fce8f5382686..9c6f729a3478a 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -497,9 +497,13 @@ def __init__( super().__init__(file, mode, zipfile.ZIP_DEFLATED, **kwargs) def write(self, data): - archive_name = self.filename if self.archive_name is not None: archive_name = self.archive_name + elif self.filename is not None: + archive_name = self.filename + else: + raise RuntimeError("No filename to write to!") + super().writestr(archive_name, data) @property diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 091f7662630ff..2213ac056cc5e 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -237,6 +237,11 @@ def _save_header(self): if not (has_aliases or self.header): return if has_aliases: + # TODO: type checking here is tricky because header is annotated as + # a Union[bool, Sequence[Hashable]] but we actually accept "sequence" types + # that don't inherit from abc.Sequence (ndarray, ABCIndex) + assert not isinstance(header, bool) + if len(header) != len(cols): raise ValueError( f"Writing {len(cols)} cols but got {len(header)} aliases" @@ -268,12 +273,15 @@ def _save_header(self): # given a string for a DF with Index index_label = [index_label] - encoded_labels = list(index_label) + # TODO: mismatch here because encoded_labels is a Sequence[str] + # but we fill with Sequence[Hashable]; need to clean up handling + # of non-None / non-str contained objects + encoded_labels = list(index_label) # type: ignore else: encoded_labels = [] if not has_mi_columns or has_aliases: - encoded_labels += list(write_cols) + encoded_labels += list(write_cols) # type: ignore writer.writerow(encoded_labels) else: # write out the mi diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 5aab5b814bae7..f623aad72598a 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -202,6 +202,7 @@ class TestPDApi(Base): "_testing", "_tslib", "_typing", + "_typing_extensions", "_version", ] diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 71d02db10c7ba..485ba898c6bbf 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -293,6 +293,8 @@ def decorate(func): allow_args = allowed_args else: spec = inspect.getfullargspec(func) + + assert spec.defaults is not None # TODO: this might be a bug allow_args = spec.args[: -len(spec.defaults)] @wraps(func)
Right now we have mypy 0.73 pinned in CI. However, newer versions will yield a lot of warnings like this: ```sh pandas/core/groupby/base.py:32: error: "GroupByMixin" has no attribute "obj" pandas/core/groupby/base.py:36: error: "GroupByMixin" has no attribute "_attributes" pandas/core/groupby/base.py:40: error: "GroupByMixin" has no attribute "_groupby" pandas/core/groupby/base.py:42: error: "GroupByMixin" has no attribute "_groupby" pandas/core/groupby/base.py:44: error: Too many arguments for "GroupByMixin" pandas/core/groupby/base.py:44: error: Unexpected keyword argument "groupby" for "GroupByMixin" pandas/core/groupby/base.py:44: error: Unexpected keyword argument "parent" for "GroupByMixin" pandas/core/groupby/base.py:45: error: "GroupByMixin" has no attribute "_reset_cache" ``` Most mixins we have will be problematic, if not all. Fortunately newer versions of mypy offer support for Protocols as a "self type", which were officially introduced in Python 3.8 This draft is an attempt at clarifying how that would work and seeing if it makes sense for the group. Depending on feedback, we may want to consider some kind of compat for this model to get it to work across Py36 and Py37 so our mypy doesn't lag too far behind More info on the mypy release notes: https://mypy-lang.blogspot.com/2019/11/mypy-0.html @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/33274
2020-04-03T20:02:39Z
2020-04-22T04:11:29Z
null
2023-04-12T20:17:12Z
Pass method in __finalize__
diff --git a/pandas/core/base.py b/pandas/core/base.py index a28a2c9594341..5945d8a4b432d 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1521,4 +1521,4 @@ def duplicated(self, keep="first"): else: return self._constructor( duplicated(self, keep=keep), index=self.index - ).__finalize__(self) + ).__finalize__(self, method="duplicated") diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 67523facb7b7d..aedbba755227d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2515,7 +2515,7 @@ def transpose(self, *args, copy: bool = False) -> "DataFrame": new_values, index=self.columns, columns=self.index ) - return result.__finalize__(self) + return result.__finalize__(self, method="transpose") @property def T(self) -> "DataFrame": @@ -4470,7 +4470,7 @@ def _maybe_casted_values(index, labels=None): @Appender(_shared_docs["isna"] % _shared_doc_kwargs) def isna(self) -> "DataFrame": result = self._constructor(self._data.isna(func=isna)) - return result.__finalize__(self) + return result.__finalize__(self, method="isna") @Appender(_shared_docs["isna"] % _shared_doc_kwargs) def isnull(self) -> "DataFrame": @@ -4798,7 +4798,7 @@ def sort_values( if inplace: return self._update_inplace(result) else: - return result.__finalize__(self) + return result.__finalize__(self, method="sort_values") def sort_index( self, @@ -4934,7 +4934,7 @@ def sort_index( if inplace: return self._update_inplace(result) else: - return result.__finalize__(self) + return result.__finalize__(self, method="sort_index") def value_counts( self, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fac4ca6768ece..16725e72d5df2 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -590,7 +590,9 @@ def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries: if copy: new_values = new_values.copy() - return self._constructor(new_values, *new_axes).__finalize__(self) + return self._constructor(new_values, *new_axes).__finalize__( + self, method="swapaxes" + ) def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: """ @@ -993,7 +995,7 @@ def rename( self._update_inplace(result) return None else: - return result.__finalize__(self) + return result.__finalize__(self, method="rename") @rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)]) def rename_axis(self, mapper=lib.no_default, **kwargs): @@ -1357,7 +1359,7 @@ def __invert__(self): return self new_data = self._mgr.apply(operator.invert) - result = self._constructor(new_data).__finalize__(self) + result = self._constructor(new_data).__finalize__(self, method="__invert__") return result def __nonzero__(self): @@ -1802,7 +1804,9 @@ def __array_wrap__(self, result, context=None): # ptp also requires the item_from_zerodim return result d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) - return self._constructor(result, **d).__finalize__(self) + return self._constructor(result, **d).__finalize__( + self, method="__array_wrap__" + ) # ideally we would define this to avoid the getattr checks, but # is slower @@ -3361,7 +3365,7 @@ class max_speed new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True ) - return self._constructor(new_data).__finalize__(self) + return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries: """ @@ -4430,7 +4434,7 @@ def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries: # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy - ).__finalize__(self) + ).__finalize__(self, method="reindex") def _reindex_axes( self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy @@ -5129,7 +5133,7 @@ def pipe(self, func, *args, **kwargs): # Attribute access def __finalize__( - self: FrameOrSeries, other, method=None, **kwargs + self: FrameOrSeries, other, method: Optional[str] = None, **kwargs ) -> FrameOrSeries: """ Propagate metadata from other to self. @@ -5138,9 +5142,14 @@ def __finalize__( ---------- other : the object from which to get the attributes that we are going to propagate - method : optional, a passed method name ; possibly to take different - types of propagation actions based on this + method : str, optional + A passed method name providing context on where ``__finalize__`` + was called. + + .. warning: + The value passed as `method` are not currently considered + stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: @@ -5293,10 +5302,10 @@ def _check_inplace_setting(self, value) -> bool_t: return True def _get_numeric_data(self): - return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) + return self._constructor(self._mgr.get_numeric_data()).__finalize__(self,) def _get_bool_data(self): - return self._constructor(self._mgr.get_bool_data()).__finalize__(self) + return self._constructor(self._mgr.get_bool_data()).__finalize__(self,) # ---------------------------------------------------------------------- # Internal Interface Methods @@ -5562,8 +5571,8 @@ def astype( else: # else, only a single dtype is given - new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) - return self._constructor(new_data).__finalize__(self) + new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors,) + return self._constructor(new_data).__finalize__(self, method="astype") # GH 19920: retain column metadata after concat result = pd.concat(results, axis=1, copy=False) @@ -5677,7 +5686,7 @@ def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries: """ data = self._mgr.copy(deep=deep) self._clear_item_cache() - return self._constructor(data).__finalize__(self) + return self._constructor(data).__finalize__(self, method="copy") def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries: return self.copy(deep=deep) @@ -5783,7 +5792,7 @@ def infer_objects(self: FrameOrSeries) -> FrameOrSeries: self._mgr.convert( datetime=True, numeric=False, timedelta=True, coerce=False, copy=True ) - ).__finalize__(self) + ).__finalize__(self, method="infer_objects") def convert_dtypes( self: FrameOrSeries, @@ -6110,7 +6119,7 @@ def fillna( if inplace: return self._update_inplace(result) else: - return result.__finalize__(self) + return result.__finalize__(self, method="fillna") def ffill( self: FrameOrSeries, @@ -6626,7 +6635,7 @@ def replace( if inplace: return self._update_inplace(result) else: - return result.__finalize__(self) + return result.__finalize__(self, method="replace") _shared_docs[ "interpolate" @@ -6892,7 +6901,7 @@ def interpolate( if inplace: return self._update_inplace(result) else: - return result.__finalize__(self) + return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods @@ -7130,11 +7139,11 @@ def asof(self, where, subset=None): @Appender(_shared_docs["isna"] % _shared_doc_kwargs) def isna(self: FrameOrSeries) -> FrameOrSeries: - return isna(self).__finalize__(self) + return isna(self).__finalize__(self, method="isna") @Appender(_shared_docs["isna"] % _shared_doc_kwargs) def isnull(self: FrameOrSeries) -> FrameOrSeries: - return isna(self).__finalize__(self) + return isna(self).__finalize__(self, method="isnull") _shared_docs[ "notna" @@ -7200,11 +7209,11 @@ def isnull(self: FrameOrSeries) -> FrameOrSeries: @Appender(_shared_docs["notna"] % _shared_doc_kwargs) def notna(self: FrameOrSeries) -> FrameOrSeries: - return notna(self).__finalize__(self) + return notna(self).__finalize__(self, method="notna") @Appender(_shared_docs["notna"] % _shared_doc_kwargs) def notnull(self: FrameOrSeries) -> FrameOrSeries: - return notna(self).__finalize__(self) + return notna(self).__finalize__(self, method="notnull") def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( @@ -8228,7 +8237,7 @@ def ranker(data): pct=pct, ) ranks = self._constructor(ranks, **data._construct_axes_dict()) - return ranks.__finalize__(self) + return ranks.__finalize__(self, method="rank") # if numeric_only is None, and we can't get anything, we try with # numeric_only=True @@ -8435,7 +8444,10 @@ def _align_frame( left.index = join_index right.index = join_index - return left.__finalize__(self), right.__finalize__(other) + return ( + left.__finalize__(self), + right.__finalize__(other), + ) def _align_series( self, @@ -8519,7 +8531,10 @@ def _align_series( left.index = join_index right.index = join_index - return left.__finalize__(self), right.__finalize__(other) + return ( + left.__finalize__(self), + right.__finalize__(other), + ) def _where( self, @@ -8932,7 +8947,7 @@ def shift( else: return self.tshift(periods, freq) - return self._constructor(new_data).__finalize__(self) + return self._constructor(new_data).__finalize__(self, method="shift") def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries: """ @@ -8969,7 +8984,7 @@ def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries: shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) - return new_obj.__finalize__(self) + return new_obj.__finalize__(self, method="slice_shift") def tshift( self: FrameOrSeries, periods: int = 1, freq=None, axis: Axis = 0 @@ -9029,7 +9044,7 @@ def tshift( result = self.copy() result.set_axis(new_ax, axis, inplace=True) - return result.__finalize__(self) + return result.__finalize__(self, method="tshift") def truncate( self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True @@ -9240,7 +9255,7 @@ def _tz_convert(ax, tz): result = self.copy(deep=copy) result = result.set_axis(ax, axis=axis, inplace=False) - return result.__finalize__(self) + return result.__finalize__(self, method="tz_convert") def tz_localize( self: FrameOrSeries, @@ -9409,7 +9424,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent): result = self.copy(deep=copy) result = result.set_axis(ax, axis=axis, inplace=False) - return result.__finalize__(self) + return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods @@ -11188,7 +11203,7 @@ def block_accum_func(blk_values): d = self._construct_axes_dict() d["copy"] = False - return self._constructor(result, **d).__finalize__(self) + return self._constructor(result, **d).__finalize__(self, method=name) return set_function_name(cum_func, name, cls) diff --git a/pandas/core/series.py b/pandas/core/series.py index 5ed8241101925..ccb1ec25b5ba4 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -661,7 +661,7 @@ def view(self, dtype=None) -> "Series": """ return self._constructor( self._values.view(dtype), index=self.index - ).__finalize__(self) + ).__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat @@ -829,7 +829,7 @@ def take(self, indices, axis=0, is_copy=None, **kwargs) -> "Series": return self._constructor( new_values, index=new_index, fastpath=True - ).__finalize__(self) + ).__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis=0): """ @@ -962,12 +962,12 @@ def _get_values_tuple(self, key): # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) return self._constructor(self._values[indexer], index=new_index).__finalize__( - self + self, ) def _get_values(self, indexer): try: - return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self) + return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self,) except ValueError: # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack @@ -1181,7 +1181,9 @@ def repeat(self, repeats, axis=None) -> "Series": nv.validate_repeat(tuple(), dict(axis=axis)) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) - return self._constructor(new_values, index=new_index).__finalize__(self) + return self._constructor(new_values, index=new_index).__finalize__( + self, method="repeat" + ) def reset_index(self, level=None, drop=False, name=None, inplace=False): """ @@ -1308,7 +1310,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): else: return self._constructor( self._values.copy(), index=new_index - ).__finalize__(self) + ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" @@ -1707,7 +1709,9 @@ def count(self, level=None): obs = level_codes[notna(self._values)] out = np.bincount(obs, minlength=len(lev) or None) - return self._constructor(out, index=lev, dtype="int64").__finalize__(self) + return self._constructor(out, index=lev, dtype="int64").__finalize__( + self, method="count" + ) def mode(self, dropna=True) -> "Series": """ @@ -2130,7 +2134,9 @@ def round(self, decimals=0, *args, **kwargs) -> "Series": """ nv.validate_round(args, kwargs) result = self._values.round(decimals) - result = self._constructor(result, index=self.index).__finalize__(self) + result = self._constructor(result, index=self.index).__finalize__( + self, method="round" + ) return result @@ -2352,7 +2358,9 @@ def diff(self, periods: int = 1) -> "Series": dtype: float64 """ result = algorithms.diff(self.array, periods) - return self._constructor(result, index=self.index).__finalize__(self) + return self._constructor(result, index=self.index).__finalize__( + self, method="diff" + ) def autocorr(self, lag=1) -> float: """ @@ -2469,7 +2477,7 @@ def dot(self, other): if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns - ).__finalize__(self) + ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): @@ -2994,7 +3002,7 @@ def _try_kind_sort(arr): if inplace: self._update_inplace(result) else: - return result.__finalize__(self) + return result.__finalize__(self, method="sort_values") def sort_index( self, @@ -3172,7 +3180,7 @@ def sort_index( if inplace: self._update_inplace(result) else: - return result.__finalize__(self) + return result.__finalize__(self, method="sort_index") def argsort(self, axis=0, kind="quicksort", order=None) -> "Series": """ @@ -3206,11 +3214,13 @@ def argsort(self, axis=0, kind="quicksort", order=None) -> "Series": result = Series(-1, index=self.index, name=self.name, dtype="int64") notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) - return self._constructor(result, index=self.index).__finalize__(self) + return self._constructor(result, index=self.index).__finalize__( + self, method="argsort" + ) else: return self._constructor( np.argsort(values, kind=kind), index=self.index, dtype="int64" - ).__finalize__(self) + ).__finalize__(self, method="argsort") def nlargest(self, n=5, keep="first") -> "Series": """ @@ -3428,7 +3438,7 @@ def swaplevel(self, i=-2, j=-1, copy=True) -> "Series": assert isinstance(self.index, ABCMultiIndex) new_index = self.index.swaplevel(i, j) return self._constructor(self._values, index=new_index, copy=copy).__finalize__( - self + self, method="swaplevel" ) def reorder_levels(self, order) -> "Series": @@ -3632,7 +3642,9 @@ def map(self, arg, na_action=None) -> "Series": dtype: object """ new_values = super()._map_values(arg, na_action=na_action) - return self._constructor(new_values, index=self.index).__finalize__(self) + return self._constructor(new_values, index=self.index).__finalize__( + self, method="map" + ) def _gotitem(self, key, ndim, subset=None) -> "Series": """ @@ -3819,7 +3831,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): """ if len(self) == 0: return self._constructor(dtype=self.dtype, index=self.index).__finalize__( - self + self, method="apply" ) # dispatch to agg @@ -3856,7 +3868,9 @@ def f(x): # so extension arrays can be used return self._constructor_expanddim(pd.array(mapped), index=self.index) else: - return self._constructor(mapped, index=self.index).__finalize__(self) + return self._constructor(mapped, index=self.index).__finalize__( + self, method="apply" + ) def _reduce( self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds @@ -4297,7 +4311,9 @@ def isin(self, values) -> "Series": Name: animal, dtype: bool """ result = algorithms.isin(self, values) - return self._constructor(result, index=self.index).__finalize__(self) + return self._constructor(result, index=self.index).__finalize__( + self, method="isin" + ) def between(self, left, right, inclusive=True) -> "Series": """ @@ -4533,7 +4549,9 @@ def to_timestamp(self, freq=None, how="start", copy=True) -> "Series": assert isinstance(self.index, (ABCDatetimeIndex, ABCPeriodIndex)) new_index = self.index.to_timestamp(freq=freq, how=how) - return self._constructor(new_values, index=new_index).__finalize__(self) + return self._constructor(new_values, index=new_index).__finalize__( + self, method="to_timestamp" + ) def to_period(self, freq=None, copy=True) -> "Series": """ @@ -4558,7 +4576,9 @@ def to_period(self, freq=None, copy=True) -> "Series": assert isinstance(self.index, ABCDatetimeIndex) new_index = self.index.to_period(freq=freq) - return self._constructor(new_values, index=new_index).__finalize__(self) + return self._constructor(new_values, index=new_index).__finalize__( + self, method="to_period" + ) # ---------------------------------------------------------------------- # Add index
This passes `method` everywhere we use `__finalize__`. I'm trying to get a better sense for 1. When we call finalize (and when we fail too) 2. When we finalize multiple times 3. The overhead of finalize I haven't called it anywhere new yet (followup PR with that coming though).
https://api.github.com/repos/pandas-dev/pandas/pulls/33273
2020-04-03T19:58:08Z
2020-04-06T22:04:41Z
2020-04-06T22:04:41Z
2020-04-06T22:04:50Z
CLN: Added static types _libs/algos
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 7a32b8957003e..6b6ead795584f 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -50,18 +50,17 @@ from pandas._libs.khash cimport ( import pandas._libs.missing as missing -cdef float64_t FP_ERR = 1e-13 - -cdef float64_t NaN = <float64_t>np.NaN - -cdef int64_t NPY_NAT = get_nat() +cdef: + float64_t FP_ERR = 1e-13 + float64_t NaN = <float64_t>np.NaN + int64_t NPY_NAT = get_nat() tiebreakers = { - 'average': TIEBREAK_AVERAGE, - 'min': TIEBREAK_MIN, - 'max': TIEBREAK_MAX, - 'first': TIEBREAK_FIRST, - 'dense': TIEBREAK_DENSE, + "average": TIEBREAK_AVERAGE, + "min": TIEBREAK_MIN, + "max": TIEBREAK_MAX, + "first": TIEBREAK_FIRST, + "dense": TIEBREAK_DENSE, } @@ -120,6 +119,7 @@ cpdef ndarray[int64_t, ndim=1] unique_deltas(const int64_t[:] arr): kh_int64_t *table int ret = 0 list uniques = [] + ndarray[int64_t, ndim=1] result table = kh_init_int64() kh_resize_int64(table, 10) @@ -261,7 +261,7 @@ def kth_smallest(numeric[:] a, Py_ssize_t k) -> numeric: @cython.boundscheck(False) @cython.wraparound(False) -def nancorr(const float64_t[:, :] mat, bint cov=0, minp=None): +def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None): cdef: Py_ssize_t i, j, xi, yi, N, K bint minpv @@ -325,7 +325,7 @@ def nancorr(const float64_t[:, :] mat, bint cov=0, minp=None): @cython.boundscheck(False) @cython.wraparound(False) -def nancorr_spearman(const float64_t[:, :] mat, Py_ssize_t minp=1): +def nancorr_spearman(const float64_t[:, :] mat, Py_ssize_t minp=1) -> ndarray: cdef: Py_ssize_t i, j, xi, yi, N, K ndarray[float64_t, ndim=2] result @@ -581,7 +581,7 @@ D @cython.boundscheck(False) @cython.wraparound(False) -def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): +def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None) -> ndarray: cdef: Py_ssize_t i, j, nleft, nright ndarray[int64_t, ndim=1] indexer @@ -810,18 +810,14 @@ def rank_1d( """ cdef: Py_ssize_t i, j, n, dups = 0, total_tie_count = 0, non_na_idx = 0 - ndarray[rank_t] sorted_data, values - ndarray[float64_t] ranks ndarray[int64_t] argsorted ndarray[uint8_t, cast=True] sorted_mask - rank_t val, nan_value - float64_t sum_ranks = 0 int tiebreak = 0 - bint keep_na = 0 + bint keep_na = False bint isnan, condition float64_t count = 0.0 @@ -1034,19 +1030,14 @@ def rank_2d( """ cdef: Py_ssize_t i, j, z, k, n, dups = 0, total_tie_count = 0 - Py_ssize_t infs - ndarray[float64_t, ndim=2] ranks ndarray[rank_t, ndim=2] values - ndarray[int64_t, ndim=2] argsorted - rank_t val, nan_value - float64_t sum_ranks = 0 int tiebreak = 0 - bint keep_na = 0 + bint keep_na = False float64_t count = 0.0 bint condition, skip_condition
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33271
2020-04-03T19:31:02Z
2020-04-05T20:00:29Z
2020-04-05T20:00:29Z
2020-04-06T08:28:14Z
REF: dispatch TDBlock.to_native_types to TDA._format_native_types
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index a9c8977991740..8c93dca783113 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -389,7 +389,7 @@ def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): from pandas.io.formats.format import _get_format_timedelta64 formatter = _get_format_timedelta64(self._data, na_rep) - return np.array([formatter(x) for x in self._data]) + return np.array([formatter(x) for x in self._data.ravel()]).reshape(self.shape) # ---------------------------------------------------------------- # Arithmetic Methods diff --git a/pandas/core/construction.py b/pandas/core/construction.py index c9754ff588896..2d60ad9ba50bf 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -187,7 +187,7 @@ def array( >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]') <TimedeltaArray> - ['01:00:00', '02:00:00'] + ['0 days 01:00:00', '0 days 02:00:00'] Length: 2, dtype: timedelta64[ns] Examples diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 2908d468bcae0..d2cee5d94422c 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -241,9 +241,9 @@ class TimedeltaProperties(Properties): ... pd.timedelta_range(start="1 second", periods=3, freq="S") ... ) >>> seconds_series - 0 00:00:01 - 1 00:00:02 - 2 00:00:03 + 0 0 days 00:00:01 + 1 0 days 00:00:02 + 2 0 days 00:00:03 dtype: timedelta64[ns] >>> seconds_series.dt.seconds 0 1 @@ -301,11 +301,11 @@ def components(self): -------- >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s')) >>> s - 0 00:00:00 - 1 00:00:01 - 2 00:00:02 - 3 00:00:03 - 4 00:00:04 + 0 0 days 00:00:00 + 1 0 days 00:00:01 + 2 0 days 00:00:02 + 3 0 days 00:00:03 + 4 0 days 00:00:04 dtype: timedelta64[ns] >>> s.dt.components days hours minutes seconds milliseconds microseconds nanoseconds diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d8b54fd5cffb3..fa53eeded0387 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2358,26 +2358,10 @@ def fillna(self, value, **kwargs): ) return super().fillna(value, **kwargs) - def to_native_types(self, na_rep=None, quoting=None, **kwargs): + def to_native_types(self, na_rep="NaT", **kwargs): """ convert to our native types format """ - values = self.values - mask = isna(values) - - rvalues = np.empty(values.shape, dtype=object) - if na_rep is None: - na_rep = "NaT" - rvalues[mask] = na_rep - imask = (~mask).ravel() - - # FIXME: - # should use the formats.format.Timedelta64Formatter here - # to figure what format to pass to the Timedelta - # e.g. to not show the decimals say - rvalues.flat[imask] = np.array( - [Timedelta(val)._repr_base(format="all") for val in values.ravel()[imask]], - dtype=object, - ) - return rvalues + tda = self.array_values() + return tda._format_native_types(na_rep, **kwargs) class BoolBlock(NumericBlock): diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 960a82caafeeb..48f30acf269da 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -69,8 +69,8 @@ def to_timedelta(arg, unit="ns", errors="raise"): Converting numbers by specifying the `unit` keyword argument: >>> pd.to_timedelta(np.arange(5), unit='s') - TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02', - '00:00:03', '00:00:04'], + TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02', + '0 days 00:00:03', '0 days 00:00:04'], dtype='timedelta64[ns]', freq=None) >>> pd.to_timedelta(np.arange(5), unit='d') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index a9e668312d751..59542a8da535e 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1672,14 +1672,9 @@ def _get_format_timedelta64( even_days = ( np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0 ) - all_sub_day = ( - np.logical_and(consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0 - ) if even_days: format = None - elif all_sub_day: - format = "sub_day" else: format = "long" diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 2cda4ba16f7ce..27ebee4aaaccf 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -248,12 +248,7 @@ def test_astype_str(self): { "a": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))), "b": list(map(str, map(Timestamp, b._values))), - "c": list( - map( - str, - map(lambda x: Timedelta(x)._repr_base(format="all"), c._values), - ) - ), + "c": list(map(lambda x: Timedelta(x)._repr_base(), c._values)), "d": list(map(str, d._values)), "e": list(map(str, e._values)), } diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 1a5d122d732a9..f3c3344992942 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -3003,13 +3003,13 @@ def test_days_neg(self): def test_subdays(self): y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="s") result = fmt.Timedelta64Formatter(y, box=True).get_result() - assert result[0].strip() == "'00:00:00'" - assert result[1].strip() == "'00:00:01'" + assert result[0].strip() == "'0 days 00:00:00'" + assert result[1].strip() == "'0 days 00:00:01'" def test_subdays_neg(self): y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="s") result = fmt.Timedelta64Formatter(-y, box=True).get_result() - assert result[0].strip() == "'00:00:00'" + assert result[0].strip() == "'0 days 00:00:00'" assert result[1].strip() == "'-1 days +23:59:59'" def test_zero(self): diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 2f2a663d559d0..05e708e575a64 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -132,7 +132,7 @@ def test_astype_str_map(self, dtype, series): expected = series.map(str) tm.assert_series_equal(result, expected) - def test_astype_str_cast(self): + def test_astype_str_cast_dt64(self): # see gh-9757 ts = Series([Timestamp("2010-01-04 00:00:00")]) s = ts.astype(str) @@ -146,11 +146,14 @@ def test_astype_str_cast(self): expected = Series([str("2010-01-04 00:00:00-05:00")]) tm.assert_series_equal(s, expected) + def test_astype_str_cast_td64(self): + # see gh-9757 + td = Series([Timedelta(1, unit="d")]) - s = td.astype(str) + ser = td.astype(str) - expected = Series([str("1 days 00:00:00.000000000")]) - tm.assert_series_equal(s, expected) + expected = Series([str("1 days")]) + tm.assert_series_equal(ser, expected) def test_astype_unicode(self): # see gh-7758: A bit of magic is required to set
https://api.github.com/repos/pandas-dev/pandas/pulls/33270
2020-04-03T19:18:02Z
2020-04-06T23:19:11Z
2020-04-06T23:19:11Z
2020-04-06T23:20:31Z
TYP: Fixed type annotaions in `scripts/validate_rst_title_capitalization`
diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py index 59d422a1605a0..3d19e37ac7a1d 100755 --- a/scripts/validate_rst_title_capitalization.py +++ b/scripts/validate_rst_title_capitalization.py @@ -14,7 +14,7 @@ import os import re import sys -from typing import Generator, List, Tuple +from typing import Iterable, List, Tuple CAPITALIZATION_EXCEPTIONS = { "pandas", @@ -148,7 +148,7 @@ def correct_title_capitalization(title: str) -> str: return correct_title -def find_titles(rst_file: str) -> Generator[Tuple[str, int], None, None]: +def find_titles(rst_file: str) -> Iterable[Tuple[str, int]]: """ Algorithm to identify particular text that should be considered headings in an RST file. @@ -184,7 +184,7 @@ def find_titles(rst_file: str) -> Generator[Tuple[str, int], None, None]: previous_line = line -def find_rst_files(source_paths: List[str]) -> Generator[str, None, None]: +def find_rst_files(source_paths: List[str]) -> Iterable[str]: """ Given the command line arguments of directory paths, this method yields the strings of the .rst file directories that these paths contain. @@ -214,7 +214,7 @@ def find_rst_files(source_paths: List[str]) -> Generator[str, None, None]: yield filename -def main(source_paths: List[str], output_format: str) -> bool: +def main(source_paths: List[str], output_format: str) -> int: """ The main method to print all headings with incorrect capitalization.
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33268
2020-04-03T18:56:51Z
2020-04-04T21:35:35Z
2020-04-04T21:35:35Z
2020-04-06T08:26:58Z
TST: add DataFrame test for construct from tuple case from GH-32776
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 9f40e8c6931c8..fcdc62753ca0a 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1336,6 +1336,7 @@ def test_constructor_mixed_type_rows(self): (((), ()), [(), ()]), (((), ()), [[], []]), (([], []), [[], []]), + (([1], [2]), [[1], [2]]), # GH 32776 (([1, 2, 3], [4, 5, 6]), [[1, 2, 3], [4, 5, 6]]), ], )
- [x] closes #32776 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33267
2020-04-03T18:25:16Z
2020-04-04T21:07:10Z
2020-04-04T21:07:10Z
2020-04-05T02:22:47Z
DOC: Fixed examples in `pandas/core/window`
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 3e9138814fbdf..875ca259f69d9 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -322,6 +322,10 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then pytest -q --doctest-modules pandas/core/tools/ RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests window' ; echo $MSG + pytest -q --doctest-modules pandas/core/window/ + RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests tseries' ; echo $MSG pytest -q --doctest-modules pandas/tseries/ RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 0ec876583dcde..2759280dc1d1c 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -167,33 +167,18 @@ def _constructor(self): """ Examples -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 >>> df.ewm(alpha=0.5).mean() A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.464856 0.569633 -0.490089 - 2 -0.207700 0.149687 -1.135379 - 3 -0.471677 -0.645305 -0.906555 - 4 -0.355635 -0.203033 -0.904111 - 5 1.076417 1.503943 -1.146293 - 6 -0.041654 1.925562 -0.588728 - 7 0.680292 0.132049 0.548693 - 8 0.067236 0.948257 0.163353 - 9 -0.286980 0.618493 -0.694496 + 0 1.000000 4.000000 7.000000 + 1 1.666667 4.666667 7.666667 + 2 2.428571 5.428571 8.428571 """ ) diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index 140e0144d0a2d..146c139806bca 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -37,7 +37,8 @@ class Expanding(_Rolling_and_Expanding): Examples -------- - >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]}) + >>> df B 0 0.0 1 1.0 @@ -98,33 +99,18 @@ def _get_window(self, other=None, **kwargs): """ Examples -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 >>> df.ewm(alpha=0.5).mean() A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.464856 0.569633 -0.490089 - 2 -0.207700 0.149687 -1.135379 - 3 -0.471677 -0.645305 -0.906555 - 4 -0.355635 -0.203033 -0.904111 - 5 1.076417 1.503943 -1.146293 - 6 -0.041654 1.925562 -0.588728 - 7 0.680292 0.132049 0.548693 - 8 0.067236 0.948257 0.163353 - 9 -0.286980 0.618493 -0.694496 + 0 1.000000 4.000000 7.000000 + 1 1.666667 4.666667 7.666667 + 2 2.428571 5.428571 8.428571 """ ) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index dc8cf839d0bcb..729e4069b1309 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -1039,33 +1039,18 @@ def _get_window( """ Examples -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 - - >>> df.rolling(3, win_type='boxcar').agg('mean') - A B C - 0 NaN NaN NaN - 1 NaN NaN NaN - 2 -0.885035 0.212600 -0.711689 - 3 -0.323928 -0.200122 -1.093408 - 4 -0.071445 -0.431533 -1.075833 - 5 0.504739 0.676083 -0.996353 - 6 0.358206 1.903256 -0.774200 - 7 0.906020 1.283573 0.085482 - 8 -0.096361 0.818139 0.472290 - 9 0.070889 0.134399 -0.031308 + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.rolling(2, win_type="boxcar").agg("mean") + A B C + 0 NaN NaN NaN + 1 1.5 4.5 7.5 + 2 2.5 5.5 8.5 """ ) @@ -1904,46 +1889,24 @@ def _validate_freq(self): """ Examples -------- - - >>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C']) + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) >>> df - A B C - 0 -2.385977 -0.102758 0.438822 - 1 -1.004295 0.905829 -0.954544 - 2 0.735167 -0.165272 -1.619346 - 3 -0.702657 -1.340923 -0.706334 - 4 -0.246845 0.211596 -0.901819 - 5 2.463718 3.157577 -1.380906 - 6 -1.142255 2.340594 -0.039875 - 7 1.396598 -1.647453 1.677227 - 8 -0.543425 1.761277 -0.220481 - 9 -0.640505 0.289374 -1.550670 + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 - >>> df.rolling(3).sum() - A B C - 0 NaN NaN NaN - 1 NaN NaN NaN - 2 -2.655105 0.637799 -2.135068 - 3 -0.971785 -0.600366 -3.280224 - 4 -0.214334 -1.294599 -3.227500 - 5 1.514216 2.028250 -2.989060 - 6 1.074618 5.709767 -2.322600 - 7 2.718061 3.850718 0.256446 - 8 -0.289082 2.454418 1.416871 - 9 0.212668 0.403198 -0.093924 - - >>> df.rolling(3).agg({'A':'sum', 'B':'min'}) - A B - 0 NaN NaN - 1 NaN NaN - 2 -2.655105 -0.165272 - 3 -0.971785 -1.340923 - 4 -0.214334 -1.340923 - 5 1.514216 -1.340923 - 6 1.074618 0.211596 - 7 2.718061 -1.647453 - 8 -0.289082 -1.647453 - 9 0.212668 -1.647453 + >>> df.rolling(2).sum() + A B C + 0 NaN NaN NaN + 1 3.0 9.0 15.0 + 2 5.0 11.0 17.0 + + >>> df.rolling(2).agg({"A": "sum", "B": "min"}) + A B + 0 NaN NaN + 1 3.0 4.0 + 2 5.0 5.0 """ )
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33266
2020-04-03T18:10:47Z
2020-04-04T21:10:00Z
2020-04-04T21:10:00Z
2020-04-06T08:34:41Z
TST: add date range test for reindex case from GH-32740
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 1529a259c49af..e109c7a4f1c8d 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -1,4 +1,4 @@ -from datetime import date +from datetime import date, timedelta import dateutil import numpy as np @@ -44,6 +44,45 @@ def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self): assert str(index.reindex([])[0].tz) == "US/Eastern" assert str(index.reindex(np.array([]))[0].tz) == "US/Eastern" + def test_reindex_with_same_tz(self): + # GH 32740 + rng_a = date_range("2010-01-01", "2010-01-02", periods=24, tz="utc") + rng_b = date_range("2010-01-01", "2010-01-02", periods=23, tz="utc") + result1, result2 = rng_a.reindex( + rng_b, method="nearest", tolerance=timedelta(seconds=20) + ) + expected_list1 = [ + "2010-01-01 00:00:00", + "2010-01-01 01:05:27.272727272", + "2010-01-01 02:10:54.545454545", + "2010-01-01 03:16:21.818181818", + "2010-01-01 04:21:49.090909090", + "2010-01-01 05:27:16.363636363", + "2010-01-01 06:32:43.636363636", + "2010-01-01 07:38:10.909090909", + "2010-01-01 08:43:38.181818181", + "2010-01-01 09:49:05.454545454", + "2010-01-01 10:54:32.727272727", + "2010-01-01 12:00:00", + "2010-01-01 13:05:27.272727272", + "2010-01-01 14:10:54.545454545", + "2010-01-01 15:16:21.818181818", + "2010-01-01 16:21:49.090909090", + "2010-01-01 17:27:16.363636363", + "2010-01-01 18:32:43.636363636", + "2010-01-01 19:38:10.909090909", + "2010-01-01 20:43:38.181818181", + "2010-01-01 21:49:05.454545454", + "2010-01-01 22:54:32.727272727", + "2010-01-02 00:00:00", + ] + expected1 = DatetimeIndex( + expected_list1, dtype="datetime64[ns, UTC]", freq=None, + ) + expected2 = np.array([0] + [-1] * 21 + [23], dtype=np.int64,) + tm.assert_index_equal(result1, expected1) + tm.assert_numpy_array_equal(result2, expected2) + def test_time_loc(self): # GH8667 from datetime import time from pandas._libs.index import _SIZE_CUTOFF
- [x] closes #32740 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33265
2020-04-03T17:48:39Z
2020-04-05T18:28:17Z
2020-04-05T18:28:17Z
2020-04-05T18:28:26Z
DOC: Fixed examples in `pandas/core/aggregation.py`
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 0454150f61045..8901efad56f79 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -272,6 +272,10 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then pytest -q --doctest-modules pandas/core/accessor.py RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests aggregation.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/aggregation.py + RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests base.py' ; echo $MSG pytest -q --doctest-modules pandas/core/base.py RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 448f84d58d7a0..f6380808d5ac2 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -27,10 +27,9 @@ def is_multi_agg_with_relabel(**kwargs) -> bool: Examples -------- - >>> is_multi_agg_with_relabel(a='max') + >>> is_multi_agg_with_relabel(a="max") False - >>> is_multi_agg_with_relabel(a_max=('a', 'max'), - ... a_min=('a', 'min')) + >>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min")) True >>> is_multi_agg_with_relabel() False @@ -61,8 +60,8 @@ def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[i Examples -------- - >>> normalize_keyword_aggregation({'output': ('input', 'sum')}) - ({'input': ['sum']}, ('output',), [('input', 'sum')]) + >>> normalize_keyword_aggregation({"output": ("input", "sum")}) + (defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0])) """ # Normalize the aggregation functions as Mapping[column, List[func]], # process normally, then fixup the names.
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/33263
2020-04-03T16:28:45Z
2020-04-07T17:53:06Z
2020-04-07T17:53:06Z
2020-04-07T18:49:39Z
TST: don't assert that matplotlib rejects shorthand hex colors
diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 08b33ee547a48..4a9efe9554c6e 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -2041,12 +2041,6 @@ def test_line_colors(self): self._check_colors(ax.get_lines(), linecolors=custom_colors) tm.close() - with pytest.raises(ValueError): - # Color contains shorthand hex value results in ValueError - custom_colors = ["#F00", "#00F", "#FF0", "#000", "#FFF"] - # Forced show plot - _check_plot_works(df.plot, color=custom_colors) - @pytest.mark.slow def test_dont_modify_colors(self): colors = ["r", "g", "b"] @@ -2098,14 +2092,6 @@ def test_line_colors_and_styles_subplots(self): self._check_colors(ax.get_lines(), linecolors=[c]) tm.close() - with pytest.raises(ValueError): - # Color contains shorthand hex value results in ValueError - custom_colors = ["#F00", "#00F", "#FF0", "#000", "#FFF"] - # Forced show plot - # _check_plot_works adds an ax so catch warning. see GH #13188 - with tm.assert_produces_warning(UserWarning): - _check_plot_works(df.plot, color=custom_colors, subplots=True) - rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] for cmap in ["jet", cm.jet]: axes = df.plot(colormap=cmap, subplots=True)
From 3.2 [it accepts them](https://matplotlib.org/users/prev_whats_new/whats_new_3.2.0.html#digit-and-4-digit-hex-colors). [Test failure log](https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=954647) (from Debian's pandas 0.25, so includes other issues we don't now have - this one is DID NOT RAISE).
https://api.github.com/repos/pandas-dev/pandas/pulls/33262
2020-04-03T16:09:29Z
2020-04-03T19:14:24Z
2020-04-03T19:14:24Z
2020-04-03T19:14:34Z
PERF: masked ops for reductions (min/max)
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 19e8acdaa7384..d232b68e3c450 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -274,7 +274,7 @@ Performance improvements sparse values from ``scipy.sparse`` matrices using the :meth:`DataFrame.sparse.from_spmatrix` constructor (:issue:`32821`, :issue:`32825`, :issue:`32826`, :issue:`32856`, :issue:`32858`). -- Performance improvement in :meth:`Series.sum` for nullable (integer and boolean) dtypes (:issue:`30982`). +- Performance improvement in reductions (sum, min, max) for nullable (integer and boolean) dtypes (:issue:`30982`, :issue:`33261`). .. --------------------------------------------------------------------------- diff --git a/pandas/core/array_algos/masked_reductions.py b/pandas/core/array_algos/masked_reductions.py index 0fb2605b554c2..b3723340cefd6 100644 --- a/pandas/core/array_algos/masked_reductions.py +++ b/pandas/core/array_algos/masked_reductions.py @@ -45,3 +45,44 @@ def sum( return np.sum(values[~mask]) else: return np.sum(values, where=~mask) + + +def _minmax(func, values: np.ndarray, mask: np.ndarray, skipna: bool = True): + """ + Reduction for 1D masked array. + + Parameters + ---------- + func : np.min or np.max + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). + mask : np.ndarray + Boolean numpy array (True values indicate missing values). + skipna : bool, default True + Whether to skip NA. + """ + if not skipna: + if mask.any(): + return libmissing.NA + else: + if values.size: + return func(values) + else: + # min/max with empty array raise in numpy, pandas returns NA + return libmissing.NA + else: + subset = values[~mask] + if subset.size: + return func(values[~mask]) + else: + # min/max with empty array raise in numpy, pandas returns NA + return libmissing.NA + + +def min(values: np.ndarray, mask: np.ndarray, skipna: bool = True): + return _minmax(np.min, values=values, mask=mask, skipna=skipna) + + +def max(values: np.ndarray, mask: np.ndarray, skipna: bool = True): + return _minmax(np.max, values=values, mask=mask, skipna=skipna) diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index 442d4ca8cef6d..e85534def6b97 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -696,8 +696,9 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs): data = self._data mask = self._mask - if name == "sum": - return masked_reductions.sum(data, mask, skipna=skipna, **kwargs) + if name in {"sum", "min", "max"}: + op = getattr(masked_reductions, name) + return op(data, mask, skipna=skipna, **kwargs) # coerce to a nan-aware float if needed if self._hasna: @@ -715,9 +716,6 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs): if int_result == result: result = int_result - elif name in ["min", "max"] and notna(result): - result = np.bool_(result) - return result def _maybe_mask_result(self, result, mask, other, op_name: str): diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 4f3c68aa03b16..541ecdeb45830 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -561,8 +561,9 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs): data = self._data mask = self._mask - if name == "sum": - return masked_reductions.sum(data, mask, skipna=skipna, **kwargs) + if name in {"sum", "min", "max"}: + op = getattr(masked_reductions, name) + return op(data, mask, skipna=skipna, **kwargs) # coerce to a nan-aware float if needed # (we explicitly use NaN within reductions) @@ -581,7 +582,7 @@ def _reduce(self, name: str, skipna: bool = True, **kwargs): # if we have a preservable numeric op, # provide coercion back to an integer type if possible - elif name in ["min", "max", "prod"]: + elif name == "prod": # GH#31409 more performant than casting-then-checking result = com.cast_scalar_indexer(result) diff --git a/pandas/tests/arrays/integer/test_dtypes.py b/pandas/tests/arrays/integer/test_dtypes.py index ee1ec86745246..515013e95c717 100644 --- a/pandas/tests/arrays/integer/test_dtypes.py +++ b/pandas/tests/arrays/integer/test_dtypes.py @@ -34,7 +34,7 @@ def test_preserve_dtypes(op): # op result = getattr(df.C, op)() - if op == "sum": + if op in {"sum", "min", "max"}: assert isinstance(result, np.int64) else: assert isinstance(result, int) diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 962b105d1e8fc..8fb035e085d40 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -65,27 +65,58 @@ def test_ops(self, opname, obj): assert result.value == expected @pytest.mark.parametrize("opname", ["max", "min"]) - def test_nanops(self, opname, index_or_series): + @pytest.mark.parametrize( + "dtype, val", + [ + ("object", 2.0), + ("float64", 2.0), + ("datetime64[ns]", datetime(2011, 11, 1)), + ("Int64", 2), + ("boolean", True), + ], + ) + def test_nanminmax(self, opname, dtype, val, index_or_series): # GH#7261 klass = index_or_series - arg_op = "arg" + opname if klass is Index else "idx" + opname - obj = klass([np.nan, 2.0]) - assert getattr(obj, opname)() == 2.0 + if dtype in ["Int64", "boolean"] and klass == pd.Index: + pytest.skip("EAs can't yet be stored in an index") - obj = klass([np.nan]) - assert pd.isna(getattr(obj, opname)()) - assert pd.isna(getattr(obj, opname)(skipna=False)) + def check_missing(res): + if dtype == "datetime64[ns]": + return res is pd.NaT + elif dtype == "Int64": + return res is pd.NA + else: + return pd.isna(res) - obj = klass([], dtype=object) - assert pd.isna(getattr(obj, opname)()) - assert pd.isna(getattr(obj, opname)(skipna=False)) + obj = klass([None], dtype=dtype) + assert check_missing(getattr(obj, opname)()) + assert check_missing(getattr(obj, opname)(skipna=False)) - obj = klass([pd.NaT, datetime(2011, 11, 1)]) - # check DatetimeIndex monotonic path - assert getattr(obj, opname)() == datetime(2011, 11, 1) - assert getattr(obj, opname)(skipna=False) is pd.NaT + obj = klass([], dtype=dtype) + assert check_missing(getattr(obj, opname)()) + assert check_missing(getattr(obj, opname)(skipna=False)) + + if dtype == "object": + # generic test with object only works for empty / all NaN + return + + obj = klass([None, val], dtype=dtype) + assert getattr(obj, opname)() == val + assert check_missing(getattr(obj, opname)(skipna=False)) + obj = klass([None, val, None], dtype=dtype) + assert getattr(obj, opname)() == val + assert check_missing(getattr(obj, opname)(skipna=False)) + + @pytest.mark.parametrize("opname", ["max", "min"]) + def test_nanargminmax(self, opname, index_or_series): + # GH#7261 + klass = index_or_series + arg_op = "arg" + opname if klass is Index else "idx" + opname + + obj = klass([pd.NaT, datetime(2011, 11, 1)]) assert getattr(obj, arg_op)() == 1 result = getattr(obj, arg_op)(skipna=False) if klass is Series: @@ -95,9 +126,6 @@ def test_nanops(self, opname, index_or_series): obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT]) # check DatetimeIndex non-monotonic path - assert getattr(obj, opname)(), datetime(2011, 11, 1) - assert getattr(obj, opname)(skipna=False) is pd.NaT - assert getattr(obj, arg_op)() == 1 result = getattr(obj, arg_op)(skipna=False) if klass is Series:
Follow-up on https://github.com/pandas-dev/pandas/pull/30982, adding similar mask reduction but now for min/max.
https://api.github.com/repos/pandas-dev/pandas/pulls/33261
2020-04-03T15:04:06Z
2020-04-06T23:28:01Z
2020-04-06T23:28:00Z
2020-04-07T06:57:46Z
DOC: Fixed examples in `pandas/core/accessor.py`
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index e987e147fa343..5aecfa543783c 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -268,6 +268,10 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then # Individual files + MSG='Doctests accessor.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/accessor.py + RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests base.py' ; echo $MSG pytest -q --doctest-modules pandas/core/base.py RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index b6ca19bde8009..f970cefe15527 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -257,12 +257,13 @@ def plot(self): Back in an interactive IPython session: - >>> ds = pd.DataFrame({{'longitude': np.linspace(0, 10), - ... 'latitude': np.linspace(0, 20)}}) - >>> ds.geo.center - (5.0, 10.0) - >>> ds.geo.plot() - # plots data on a map + .. code-block:: ipython + + In [1]: ds = pd.DataFrame({{"longitude": np.linspace(0, 10), + ...: "latitude": np.linspace(0, 20)}}) + In [2]: ds.geo.center + Out[2]: (5.0, 10.0) + In [3]: ds.geo.plot() # plots data on a map """ def decorator(accessor):
Since this changes require an interactive Ipython shell (I think), we can't run check them from pytest. This is what the docs looks like: Before: ![Screenshot_20200403_170159](https://user-images.githubusercontent.com/50263213/78368644-9c0b8400-75cc-11ea-9061-24652f2bdfca.png) After: ![register_acc](https://user-images.githubusercontent.com/50263213/78368530-71b9c680-75cc-11ea-8ab5-d2ee2eadc727.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/33260
2020-04-03T14:01:04Z
2020-04-04T15:29:08Z
2020-04-04T15:29:08Z
2020-04-06T08:35:11Z
DOC: Added an example for each series.dt field accessor
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index be2ac8c22bc8a..b9f9edcebad5b 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1239,6 +1239,22 @@ def date(self): "Y", """ The year of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="Y") + ... ) + >>> datetime_series + 0 2000-12-31 + 1 2001-12-31 + 2 2002-12-31 + dtype: datetime64[ns] + >>> datetime_series.dt.year + 0 2000 + 1 2001 + 2 2002 + dtype: int64 """, ) month = _field_accessor( @@ -1246,6 +1262,22 @@ def date(self): "M", """ The month as January=1, December=12. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="M") + ... ) + >>> datetime_series + 0 2000-01-31 + 1 2000-02-29 + 2 2000-03-31 + dtype: datetime64[ns] + >>> datetime_series.dt.month + 0 1 + 1 2 + 2 3 + dtype: int64 """, ) day = _field_accessor( @@ -1253,6 +1285,22 @@ def date(self): "D", """ The day of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="D") + ... ) + >>> datetime_series + 0 2000-01-01 + 1 2000-01-02 + 2 2000-01-03 + dtype: datetime64[ns] + >>> datetime_series.dt.day + 0 1 + 1 2 + 2 3 + dtype: int64 """, ) hour = _field_accessor( @@ -1260,6 +1308,22 @@ def date(self): "h", """ The hours of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="h") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 01:00:00 + 2 2000-01-01 02:00:00 + dtype: datetime64[ns] + >>> datetime_series.dt.hour + 0 0 + 1 1 + 2 2 + dtype: int64 """, ) minute = _field_accessor( @@ -1267,6 +1331,22 @@ def date(self): "m", """ The minutes of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="T") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 00:01:00 + 2 2000-01-01 00:02:00 + dtype: datetime64[ns] + >>> datetime_series.dt.minute + 0 0 + 1 1 + 2 2 + dtype: int64 """, ) second = _field_accessor( @@ -1274,6 +1354,22 @@ def date(self): "s", """ The seconds of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="s") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 00:00:01 + 2 2000-01-01 00:00:02 + dtype: datetime64[ns] + >>> datetime_series.dt.second + 0 0 + 1 1 + 2 2 + dtype: int64 """, ) microsecond = _field_accessor( @@ -1281,6 +1377,22 @@ def date(self): "us", """ The microseconds of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="us") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00.000000 + 1 2000-01-01 00:00:00.000001 + 2 2000-01-01 00:00:00.000002 + dtype: datetime64[ns] + >>> datetime_series.dt.microsecond + 0 0 + 1 1 + 2 2 + dtype: int64 """, ) nanosecond = _field_accessor( @@ -1288,6 +1400,22 @@ def date(self): "ns", """ The nanoseconds of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="ns") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00.000000000 + 1 2000-01-01 00:00:00.000000001 + 2 2000-01-01 00:00:00.000000002 + dtype: datetime64[ns] + >>> datetime_series.dt.nanosecond + 0 0 + 1 1 + 2 2 + dtype: int64 """, ) weekofyear = _field_accessor(
- [x] xref https://github.com/pandas-dev/pandas/pull/33208#discussion_r402189839 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry --- I can also add examples that would cover, ````TimedeltaProperties``` and ```PeriodProperties```, but I think that would be over verbose, wdyt?
https://api.github.com/repos/pandas-dev/pandas/pulls/33259
2020-04-03T12:35:53Z
2020-04-06T22:10:19Z
2020-04-06T22:10:19Z
2020-04-07T10:48:22Z
CLN: Reorgenized doctests order
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 067d88a666bb3..e987e147fa343 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -266,57 +266,62 @@ fi ### DOCTESTS ### if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then + # Individual files + + MSG='Doctests base.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/base.py + RET=$(($RET + $?)) ; echo $MSG "DONE" + + MSG='Doctests construction.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/construction.py + RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests frame.py' ; echo $MSG pytest -q --doctest-modules pandas/core/frame.py RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Doctests series.py' ; echo $MSG - pytest -q --doctest-modules pandas/core/series.py + MSG='Doctests generic.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/generic.py RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Doctests groupby.py' ; echo $MSG pytest -q --doctest-modules pandas/core/groupby/groupby.py -k"-cumcount -describe -pipe" RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Doctests indexes' ; echo $MSG - pytest -q --doctest-modules pandas/core/indexes/ + MSG='Doctests series.py' ; echo $MSG + pytest -q --doctest-modules pandas/core/series.py RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Doctests tools' ; echo $MSG - pytest -q --doctest-modules pandas/core/tools/ - RET=$(($RET + $?)) ; echo $MSG "DONE" - - MSG='Doctests reshaping functions' ; echo $MSG - pytest -q --doctest-modules pandas/core/reshape/ - RET=$(($RET + $?)) ; echo $MSG "DONE" + # Directories MSG='Doctests arrays'; echo $MSG pytest -q --doctest-modules pandas/core/arrays/ RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests computation' ; echo $MSG + pytest -q --doctest-modules pandas/core/computation/ + RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Doctests dtypes'; echo $MSG pytest -q --doctest-modules pandas/core/dtypes/ RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Doctests base.py' ; echo $MSG - pytest -q --doctest-modules pandas/core/base.py + MSG='Doctests indexes' ; echo $MSG + pytest -q --doctest-modules pandas/core/indexes/ RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Doctests construction.py' ; echo $MSG - pytest -q --doctest-modules pandas/core/construction.py + MSG='Doctests reshaping functions' ; echo $MSG + pytest -q --doctest-modules pandas/core/reshape/ RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Doctests generic.py' ; echo $MSG - pytest -q --doctest-modules pandas/core/generic.py + MSG='Doctests tools' ; echo $MSG + pytest -q --doctest-modules pandas/core/tools/ RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Doctests tseries' ; echo $MSG pytest -q --doctest-modules pandas/tseries/ RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Doctests computation' ; echo $MSG - pytest -q --doctest-modules pandas/core/computation/ - RET=$(($RET + $?)) ; echo $MSG "DONE" fi ### DOCSTRINGS ###
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry --- This does not changes anything, just moving things around, to make it more manageable (IMO)
https://api.github.com/repos/pandas-dev/pandas/pulls/33257
2020-04-03T11:10:20Z
2020-04-03T13:08:38Z
2020-04-03T13:08:38Z
2020-04-03T14:01:48Z
INT: provide helpers for accessing the values of DataFrame columns
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c4471dacfce9c..cad954c1681c1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -23,6 +23,7 @@ FrozenSet, Hashable, Iterable, + Iterator, List, Optional, Sequence, @@ -40,7 +41,16 @@ from pandas._config import get_option from pandas._libs import algos as libalgos, lib, properties -from pandas._typing import Axes, Axis, Dtype, FilePathOrBuffer, Label, Level, Renamer +from pandas._typing import ( + ArrayLike, + Axes, + Axis, + Dtype, + FilePathOrBuffer, + Label, + Level, + Renamer, +) from pandas.compat import PY37 from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv @@ -2573,6 +2583,21 @@ def _ixs(self, i: int, axis: int = 0): return result + def _get_column_array(self, i: int) -> ArrayLike: + """ + Get the values of the i'th column (ndarray or ExtensionArray, as stored + in the Block) + """ + return self._data.iget_values(i) + + def _iter_column_arrays(self) -> Iterator[ArrayLike]: + """ + Iterate over the arrays of all columns in order. + This returns the values as stored in the Block (ndarray or ExtensionArray). + """ + for i in range(len(self.columns)): + yield self._get_column_array(i) + def __getitem__(self, key): key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) @@ -8031,8 +8056,12 @@ def _reduce( assert filter_type is None or filter_type == "bool", filter_type - dtype_is_dt = self.dtypes.apply( - lambda x: is_datetime64_any_dtype(x) or is_period_dtype(x) + dtype_is_dt = np.array( + [ + is_datetime64_any_dtype(values.dtype) or is_period_dtype(values.dtype) + for values in self._iter_column_arrays() + ], + dtype=bool, ) if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any(): warnings.warn( diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index bfb16b48d832c..cc41bfc45a9cf 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -984,6 +984,14 @@ def iget(self, i: int) -> "SingleBlockManager": self.axes[1], ) + def iget_values(self, i: int) -> ArrayLike: + """ + Return the data for column i as the values (ndarray or ExtensionArray). + """ + block = self.blocks[self.blknos[i]] + values = block.iget(self.blklocs[i]) + return values + def idelete(self, indexer): """ Delete selected locations in-place (new block and array, same BlockManager)
Broken off from https://github.com/pandas-dev/pandas/pull/32867, also mentioned this in https://github.com/pandas-dev/pandas/pull/33229 In general, when doing certain things column-wise, we often just need the arrays (eg to calculate a reduction, to check the dtype, ..), and creating Series gives unnecessary overhead in that case. In this PR, I added therefore two helper functions for making it easier to do this (`_ixs_values` as variant on `_ixs` but returning the array instead of Series, and `_iter_arrays` that calls `_ixs_values` for each column iteratively as an additional helper function). I also used it in one place as illustration (in `_reduce`, what I was working on in https://github.com/pandas-dev/pandas/pull/32867, but it can of course be used more generally). In that example case, it is to replace an apply: ``` In [1]: df = pd.DataFrame(np.random.randint(1000, size=(10000,10))).astype("Int64").copy() In [2]: from pandas.core.dtypes.common import is_datetime64_any_dtype, is_period_dtype In [3]: %timeit df.dtypes.apply(lambda x: is_datetime64_any_dtype(x) or is_period_dtype(x)) 256 µs ± 2.66 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) In [5]: %timeit np.array([is_datetime64_any_dtype(values) or is_period_dtype(values) for values in df._iter_arrays()], dtype=bool) 56.2 µs ± 282 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) ``` (and this is only fo 10 columns) @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/33252
2020-04-03T07:53:42Z
2020-04-10T17:37:22Z
2020-04-10T17:37:22Z
2020-04-10T17:57:22Z
Display Timedelta nanoseconds
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 7fe8f78151336..0969360d66fcc 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -310,6 +310,7 @@ Timedelta - Bug in constructing a :class:`Timedelta` with a high precision integer that would round the :class:`Timedelta` components (:issue:`31354`) - Bug in dividing ``np.nan`` or ``None`` by :class:`Timedelta`` incorrectly returning ``NaT`` (:issue:`31869`) - Timedeltas now understand ``µs`` as identifier for microsecond (:issue:`32899`) +- :class:`Timedelta` string representation now includes nanoseconds, when nanoseconds are non-zero (:issue:`9309`) Timezones ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index c8bf317cbf041..f2b77f3517a25 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1072,9 +1072,11 @@ cdef class _Timedelta(timedelta): subs = (self._h or self._m or self._s or self._ms or self._us or self._ns) - # by default not showing nano if self._ms or self._us or self._ns: seconds_fmt = "{seconds:02}.{milliseconds:03}{microseconds:03}" + if self._ns: + # GH#9309 + seconds_fmt += "{nanoseconds:03}" else: seconds_fmt = "{seconds:02}" diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index d7529ec799022..960a82caafeeb 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -58,12 +58,12 @@ def to_timedelta(arg, unit="ns", errors="raise"): >>> pd.to_timedelta('1 days 06:05:01.00003') Timedelta('1 days 06:05:01.000030') >>> pd.to_timedelta('15.5us') - Timedelta('0 days 00:00:00.000015') + Timedelta('0 days 00:00:00.000015500') Parsing a list or array of strings: >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) - TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT], + TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT], dtype='timedelta64[ns]', freq=None) Converting numbers by specifying the `unit` keyword argument: diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py index 8a75e80a12f52..b61d0d28e2fba 100644 --- a/pandas/tests/frame/methods/test_describe.py +++ b/pandas/tests/frame/methods/test_describe.py @@ -230,15 +230,15 @@ def test_describe_timedelta_values(self): tm.assert_frame_equal(result, expected) exp_repr = ( - " t1 t2\n" - "count 5 5\n" - "mean 3 days 00:00:00 0 days 03:00:00\n" - "std 1 days 13:56:50.394919 0 days 01:34:52.099788\n" - "min 1 days 00:00:00 0 days 01:00:00\n" - "25% 2 days 00:00:00 0 days 02:00:00\n" - "50% 3 days 00:00:00 0 days 03:00:00\n" - "75% 4 days 00:00:00 0 days 04:00:00\n" - "max 5 days 00:00:00 0 days 05:00:00" + " t1 t2\n" + "count 5 5\n" + "mean 3 days 00:00:00 0 days 03:00:00\n" + "std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n" + "min 1 days 00:00:00 0 days 01:00:00\n" + "25% 2 days 00:00:00 0 days 02:00:00\n" + "50% 3 days 00:00:00 0 days 03:00:00\n" + "75% 4 days 00:00:00 0 days 04:00:00\n" + "max 5 days 00:00:00 0 days 05:00:00" ) assert repr(result) == exp_repr diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py index ec3c6e9e3a326..c58994d738562 100644 --- a/pandas/tests/scalar/timedelta/test_constructors.py +++ b/pandas/tests/scalar/timedelta/test_constructors.py @@ -176,10 +176,9 @@ def test_td_from_repr_roundtrip(val): td = Timedelta(val) assert Timedelta(td.value) == td - # str does not normally display nanos - if not td.nanoseconds: - assert Timedelta(str(td)) == td + assert Timedelta(str(td)) == td assert Timedelta(td._repr_base(format="all")) == td + assert Timedelta(td._repr_base()) == td def test_overflow_on_construction():
- [x] closes #9309 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This allows us to make TimedeltaBlock.to_native_types dispatch to TimedeltaArray._format_native_types. ATM TDBlock.to_native_types has a comment ``` # FIXME: # should use the formats.format.Timedelta64Formatter here # to figure what format to pass to the Timedelta # e.g. to not show the decimals say ```
https://api.github.com/repos/pandas-dev/pandas/pulls/33250
2020-04-03T02:07:48Z
2020-04-03T18:53:38Z
2020-04-03T18:53:37Z
2020-04-03T19:13:12Z
REF: dispatch DatetimeBlock.to_native_types to DTA._format_native_types
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 741290a4908a5..be2ac8c22bc8a 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -613,8 +613,8 @@ def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): fmt = _get_format_datetime64_from_values(self, date_format) return tslib.format_array_from_datetime( - self.asi8, tz=self.tz, format=fmt, na_rep=na_rep - ) + self.asi8.ravel(), tz=self.tz, format=fmt, na_rep=na_rep + ).reshape(self.shape) # ----------------------------------------------------------------- # Comparison Methods diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 2cc0bb07bd17f..bd90325114ee1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -6,7 +6,7 @@ import numpy as np -from pandas._libs import NaT, Timestamp, algos as libalgos, lib, tslib, writers +from pandas._libs import NaT, Timestamp, algos as libalgos, lib, writers import pandas._libs.internals as libinternals from pandas._libs.tslibs import Timedelta, conversion from pandas._libs.tslibs.timezones import tz_compare @@ -2116,21 +2116,13 @@ def _can_hold_element(self, element: Any) -> bool: return is_valid_nat_for_dtype(element, self.dtype) - def to_native_types(self, na_rep=None, date_format=None, quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ - values = self.values - i8values = self.values.view("i8") - - from pandas.io.formats.format import _get_format_datetime64_from_values - - fmt = _get_format_datetime64_from_values(values, date_format) + def to_native_types(self, na_rep="NaT", date_format=None, **kwargs): + """ convert to our native types format """ + dta = self.array_values() - result = tslib.format_array_from_datetime( - i8values.ravel(), - tz=getattr(self.values, "tz", None), - format=fmt, - na_rep=na_rep, - ).reshape(i8values.shape) + result = dta._format_native_types( + na_rep=na_rep, date_format=date_format, **kwargs + ) return np.atleast_2d(result) def set(self, locs, values): diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 17cc897136aad..a9e668312d751 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1547,7 +1547,7 @@ def _is_dates_only( values: Union[np.ndarray, DatetimeArray, Index, DatetimeIndex] ) -> bool: # return a boolean if we are only dates (and don't have a timezone) - assert values.ndim == 1 + values = values.ravel() values = DatetimeIndex(values) if values.tz is not None:
xref #33248
https://api.github.com/repos/pandas-dev/pandas/pulls/33249
2020-04-03T00:44:48Z
2020-04-03T19:33:31Z
2020-04-03T19:33:31Z
2020-04-03T19:42:46Z
REF: do slicing before calling Block.to_native_types
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 8e2592a603716..44c63aaf6a758 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -651,12 +651,10 @@ def should_store(self, value: ArrayLike) -> bool: """ return is_dtype_equal(value.dtype, self.dtype) - def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ + def to_native_types(self, na_rep="nan", quoting=None, **kwargs): + """ convert to our native types format """ values = self.values - if slicer is not None: - values = values[:, slicer] mask = isna(values) itemsize = writers.word_len(na_rep) @@ -1715,11 +1713,9 @@ def get_values(self, dtype=None): def array_values(self) -> ExtensionArray: return self.values - def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs): + def to_native_types(self, na_rep="nan", quoting=None, **kwargs): """override to use ExtensionArray astype for the conversion""" values = self.values - if slicer is not None: - values = values[slicer] mask = isna(values) values = np.asarray(values.astype(object)) @@ -1937,18 +1933,10 @@ def _can_hold_element(self, element: Any) -> bool: ) def to_native_types( - self, - slicer=None, - na_rep="", - float_format=None, - decimal=".", - quoting=None, - **kwargs, + self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs, ): - """ convert to our native types format, slicing if desired """ + """ convert to our native types format """ values = self.values - if slicer is not None: - values = values[:, slicer] # see gh-13418: no special formatting is desired at the # output (important for appropriate 'quoting' behaviour), @@ -2131,17 +2119,11 @@ def _can_hold_element(self, element: Any) -> bool: return is_valid_nat_for_dtype(element, self.dtype) - def to_native_types( - self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs - ): + def to_native_types(self, na_rep=None, date_format=None, quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values i8values = self.values.view("i8") - if slicer is not None: - values = values[..., slicer] - i8values = i8values[..., slicer] - from pandas.io.formats.format import _get_format_datetime64_from_values fmt = _get_format_datetime64_from_values(values, date_format) @@ -2387,11 +2369,9 @@ def fillna(self, value, **kwargs): ) return super().fillna(value, **kwargs) - def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ + def to_native_types(self, na_rep=None, quoting=None, **kwargs): + """ convert to our native types format """ values = self.values - if slicer is not None: - values = values[:, slicer] mask = isna(values) rvalues = np.empty(values.shape, dtype=object) diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 0d581f30e50e7..60691d1fea412 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -132,7 +132,7 @@ def __init__( # preallocate data 2d list self.blocks = self.obj._data.blocks - ncols = sum(b.shape[0] for b in self.blocks) + ncols = self.obj.shape[-1] self.data = [None] * ncols if chunksize is None: @@ -327,10 +327,13 @@ def _save_chunk(self, start_i: int, end_i: int) -> None: # create the data for a chunk slicer = slice(start_i, end_i) - for i in range(len(self.blocks)): - b = self.blocks[i] + + df = self.obj.iloc[slicer] + blocks = df._data.blocks + + for i in range(len(blocks)): + b = blocks[i] d = b.to_native_types( - slicer=slicer, na_rep=self.na_rep, float_format=self.float_format, decimal=self.decimal,
Related upcoming steps: - Dispatch DatetimeBlock.to_native_types to DatetimeArray._format_native_types (identical behavior - Dispatch TimeDeltaBlock.to_native_types to TimedeltaArray._format_native_types (behavior changing, but there is a FIXME in the Block version) - Dispatch FloatBlock.to_native_types to Float64Index._format_native_types - Possibly rename to_native_types to something more informative? (i think there's an issue about deprecating/renaming the Index method) - in io.csvs use `df._data.apply` instead of iterating over blocks. Still touches internals, but its a lighter touch.
https://api.github.com/repos/pandas-dev/pandas/pulls/33248
2020-04-02T22:31:00Z
2020-04-03T17:34:13Z
2020-04-03T17:34:13Z
2020-04-03T17:38:24Z
BUG: Fix droped result column in groupby with as_index False
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 814dbe999d5c1..0e61d1acfb022 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -640,6 +640,43 @@ The method :meth:`core.DataFrameGroupBy.size` would previously ignore ``as_index df.groupby("a", as_index=False).size() +.. _whatsnew_110.api_breaking.groupby_results_lost_as_index_false: + +:meth:`DataFrameGroupby.agg` lost results with ``as_index`` ``False`` when relabeling columns +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Previously :meth:`DataFrameGroupby.agg` lost the result columns, when the ``as_index`` option was +set to ``False`` and the result columns were relabeled. In this case he result values were replaced with +the previous index (:issue:`32240`). + +.. ipython:: python + + df = pd.DataFrame({"key": ["x", "y", "z", "x", "y", "z"], + "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}) + df + +*Previous behavior*: + +.. code-block:: ipython + + In [2]: grouped = df.groupby("key", as_index=False) + In [3]: result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + In [4]: result + Out[4]: + min_val + 0 x + 1 y + 2 z + +*New behavior*: + +.. ipython:: python + + grouped = df.groupby("key", as_index=False) + result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + result + + .. _whatsnew_110.notable_bug_fixes.apply_applymap_first_once: apply and applymap on ``DataFrame`` evaluates first row/column only once diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b9a8f3d5a5176..1d14361757e4a 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -975,16 +975,16 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) [self._selected_obj.columns.name] * result.columns.nlevels ).droplevel(-1) - if not self.as_index: - self._insert_inaxis_grouper_inplace(result) - result.index = np.arange(len(result)) - if relabeling: # used reordered index of columns result = result.iloc[:, order] result.columns = columns + if not self.as_index: + self._insert_inaxis_grouper_inplace(result) + result.index = np.arange(len(result)) + return result._convert(datetime=True) agg = aggregate diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index bf465635c0085..40a20c8210052 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -795,6 +795,41 @@ def test_groupby_aggregate_empty_key_empty_return(): tm.assert_frame_equal(result, expected) +def test_grouby_agg_loses_results_with_as_index_false_relabel(): + # GH 32240: When the aggregate function relabels column names and + # as_index=False is specified, the results are dropped. + + df = pd.DataFrame( + {"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]} + ) + + grouped = df.groupby("key", as_index=False) + result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + expected = pd.DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]}) + tm.assert_frame_equal(result, expected) + + +def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex(): + # GH 32240: When the aggregate function relabels column names and + # as_index=False is specified, the results are dropped. Check if + # multiindex is returned in the right order + + df = pd.DataFrame( + { + "key": ["x", "y", "x", "y", "x", "x"], + "key1": ["a", "b", "c", "b", "a", "c"], + "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75], + } + ) + + grouped = df.groupby(["key", "key1"], as_index=False) + result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + expected = pd.DataFrame( + {"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]} + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( "func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)] )
- [x] closes #32240 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry The aggregate function *lost* the results of the aggregate operation, if the ```as_index=False``` flag was set and one of the result columns is relabeled. The index was reset at first, adding columns to the DataFrame. The relabeling operation only took the columns before resetting the index as input. So the weird error occurred. I changed the order of the index reset and the relabeling. This fixes the issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/33247
2020-04-02T21:27:15Z
2020-07-15T22:19:13Z
2020-07-15T22:19:13Z
2020-09-05T19:04:49Z
BLD: recursive inclusion of DLLs in package data
diff --git a/setup.py b/setup.py index 461ef005c3df3..562273abb3e18 100755 --- a/setup.py +++ b/setup.py @@ -757,7 +757,7 @@ def setup_package(): maintainer=AUTHOR, version=versioneer.get_version(), packages=find_packages(include=["pandas", "pandas.*"]), - package_data={"": ["templates/*", "_libs/*.dll"]}, + package_data={"": ["templates/*", "_libs/**/*.dll"]}, ext_modules=maybe_cythonize(extensions, compiler_directives=directives), maintainer_email=EMAIL, description=DESCRIPTION,
- [x] step 1 towards #32857 Will also make a PR in https://github.com/MacPython/pandas-wheels to add the DLLs to the wheel packaging script
https://api.github.com/repos/pandas-dev/pandas/pulls/33246
2020-04-02T19:58:38Z
2020-04-10T21:05:21Z
2020-04-10T21:05:21Z
2020-05-26T09:37:43Z