title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
ASV: add benchmarks for groupby cython aggregations
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 806cf38ad90b6..fb08c6fdeaedf 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -461,6 +461,29 @@ def time_dtype_as_field(self, dtype, method, application): self.as_field_method() +class GroupByCythonAgg: + """ + Benchmarks specifically targetting our cython aggregation algorithms + (using a big enough dataframe with simple key, so a large part of the + time is actually spent in the grouped aggregation). + """ + + param_names = ["dtype", "method"] + params = [ + ["float64"], + ["sum", "prod", "min", "max", "mean", "median", "var", "first", "last"], + ] + + def setup(self, dtype, method): + N = 1_000_000 + df = DataFrame(np.random.randn(N, 10), columns=list("abcdefghij")) + df["key"] = np.random.randint(0, 100, size=N) + self.df = df + + def time_frame_agg(self, dtype, method): + self.df.groupby("key").agg(method) + + class RankWithTies: # GH 21237 param_names = ["dtype", "tie_method"]
I noticed that we currently don't really have a groupby benchmark that specifically targets the cython aggregations. We have of course benchmarks that call those, but eg `GroupByMethods` is setup in such a way that it's mostly benchmarking the factorization and post-processing (also useful of course), while eg for sum only 7% is spent in the actual `groupby_add` algorithm. So therefore this PR is adding an additional benchmark more targetted at the cython aggregation (where 30-50% of the time is spent in the actual aggregation function, so we will more easily catch regressions/improvements there) For now I only added "float64". I could also add "float32", but not sure how useful that is (since they are all using fused types, it's the same implementation for both dtypes.
https://api.github.com/repos/pandas-dev/pandas/pulls/39846
2021-02-16T16:48:43Z
2021-02-16T23:58:27Z
2021-02-16T23:58:27Z
2021-02-17T10:16:26Z
[ArrayManager] Implement concat with axis=1 (merge/join)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3a07149fe8171..461363d295f6a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -154,6 +154,7 @@ jobs: source activate pandas-dev pytest pandas/tests/frame/methods --array-manager pytest pandas/tests/arithmetic/ --array-manager + pytest pandas/tests/reshape/merge --array-manager # indexing subset (temporary since other tests don't pass yet) pytest pandas/tests/frame/indexing/test_indexing.py::TestDataFrameIndexing::test_setitem_boolean --array-manager diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index 132598e03d6c0..054ce8a40288b 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -12,7 +12,7 @@ TimeDeltaBlock, make_block, ) -from pandas.core.internals.concat import concatenate_block_managers +from pandas.core.internals.concat import concatenate_managers from pandas.core.internals.managers import ( BlockManager, SingleBlockManager, @@ -35,7 +35,7 @@ "ArrayManager", "BlockManager", "SingleBlockManager", - "concatenate_block_managers", + "concatenate_managers", # those two are preserved here for downstream compatibility (GH-33892) "create_block_manager_from_arrays", "create_block_manager_from_blocks", diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index e09a434170780..d38d278e89a67 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -831,7 +831,7 @@ def _reindex_indexer( new_axes = list(self._axes) new_axes[axis] = new_axis - return type(self)(new_arrays, new_axes) + return type(self)(new_arrays, new_axes, do_integrity_check=False) def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True): """ diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 16440f7a4c2bf..362f2fde47e0b 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -49,7 +49,46 @@ from pandas import Index -def concatenate_block_managers( +def concatenate_array_managers( + mgrs_indexers, axes: List[Index], concat_axis: int, copy: bool +) -> Manager: + """ + Concatenate array managers into one. + + Parameters + ---------- + mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + copy : bool + + Returns + ------- + ArrayManager + """ + # reindex all arrays + mgrs = [] + for mgr, indexers in mgrs_indexers: + for ax, indexer in indexers.items(): + mgr = mgr.reindex_indexer(axes[ax], indexer, axis=ax, allow_dups=True) + mgrs.append(mgr) + + if concat_axis == 1: + # concatting along the rows -> concat the reindexed arrays + # TODO(ArrayManager) doesn't yet preserve the correct dtype + arrays = [ + concat_compat([mgrs[i].arrays[j] for i in range(len(mgrs))]) + for j in range(len(mgrs[0].arrays)) + ] + return ArrayManager(arrays, [axes[1], axes[0]], do_integrity_check=False) + else: + # concatting along the columns -> combine reindexed arrays in a single manager + assert concat_axis == 0 + arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs])) + return ArrayManager(arrays, [axes[1], axes[0]], do_integrity_check=False) + + +def concatenate_managers( mgrs_indexers, axes: List[Index], concat_axis: int, copy: bool ) -> Manager: """ @@ -66,20 +105,9 @@ def concatenate_block_managers( ------- BlockManager """ + # TODO(ArrayManager) this assumes that all managers are of the same type if isinstance(mgrs_indexers[0][0], ArrayManager): - - if concat_axis == 1: - # TODO for now only fastpath without indexers - mgrs = [t[0] for t in mgrs_indexers] - arrays = [ - concat_compat([mgrs[i].arrays[j] for i in range(len(mgrs))], axis=0) - for j in range(len(mgrs[0].arrays)) - ] - return ArrayManager(arrays, [axes[1], axes[0]]) - elif concat_axis == 0: - mgrs = [t[0] for t in mgrs_indexers] - arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs])) - return ArrayManager(arrays, [axes[1], axes[0]]) + return concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy) concat_plans = [ _get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 9c536abbc7559..86df773147e21 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -282,6 +282,18 @@ def get_dtypes(self): dtypes = np.array([blk.dtype for blk in self.blocks]) return algos.take_nd(dtypes, self.blknos, allow_fill=False) + @property + def arrays(self): + """ + Quick access to the backing arrays of the Blocks. + + Only for compatibility with ArrayManager for testing convenience. + Not to be used in actual code, and return value is not the same as the + ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs). + """ + for blk in self.blocks: + yield blk.values + def __getstate__(self): block_values = [b.values for b in self.blocks] block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 92fc4a2e85163..a8c6913cd5d6c 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -43,7 +43,7 @@ get_unanimous_names, ) import pandas.core.indexes.base as ibase -from pandas.core.internals import concatenate_block_managers +from pandas.core.internals import concatenate_managers if TYPE_CHECKING: from pandas import ( @@ -524,7 +524,7 @@ def get_result(self): mgrs_indexers.append((obj._mgr, indexers)) - new_data = concatenate_block_managers( + new_data = concatenate_managers( mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy ) if not self.copy: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 79d018427aa33..217d9915f834c 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -76,7 +76,7 @@ import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.frame import _merge_doc -from pandas.core.internals import concatenate_block_managers +from pandas.core.internals import concatenate_managers from pandas.core.sorting import is_int64_overflow_possible if TYPE_CHECKING: @@ -720,7 +720,7 @@ def get_result(self): lindexers = {1: left_indexer} if left_indexer is not None else {} rindexers = {1: right_indexer} if right_indexer is not None else {} - result_data = concatenate_block_managers( + result_data = concatenate_managers( [(self.left._mgr, lindexers), (self.right._mgr, rindexers)], axes=[llabels.append(rlabels), join_index], concat_axis=0, @@ -1616,7 +1616,7 @@ def get_result(self): lindexers = {1: left_join_indexer} if left_join_indexer is not None else {} rindexers = {1: right_join_indexer} if right_join_indexer is not None else {} - result_data = concatenate_block_managers( + result_data = concatenate_managers( [(self.left._mgr, lindexers), (self.right._mgr, rindexers)], axes=[llabels.append(rlabels), join_index], concat_axis=0, diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index dc9a1565aad1e..0f51c4aef79db 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -161,7 +161,7 @@ def test_drop(self): assert return_value is None tm.assert_frame_equal(df, expected) - @td.skip_array_manager_not_yet_implemented + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) groupby def test_drop_multiindex_not_lexsorted(self): # GH#11640 diff --git a/pandas/tests/frame/methods/test_explode.py b/pandas/tests/frame/methods/test_explode.py index be80dd49ff1fb..bd0901387eeed 100644 --- a/pandas/tests/frame/methods/test_explode.py +++ b/pandas/tests/frame/methods/test_explode.py @@ -1,14 +1,9 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd import pandas._testing as tm -# TODO(ArrayManager) concat with reindexing -pytestmark = td.skip_array_manager_not_yet_implemented - def test_error(): df = pd.DataFrame( diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py index 90456ad949f59..1c7f7e3ff674a 100644 --- a/pandas/tests/frame/methods/test_join.py +++ b/pandas/tests/frame/methods/test_join.py @@ -3,8 +3,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd from pandas import ( DataFrame, @@ -15,9 +13,6 @@ ) import pandas._testing as tm -# TODO(ArrayManager) concat with reindexing -pytestmark = td.skip_array_manager_not_yet_implemented - @pytest.fixture def frame_with_period_index(): @@ -240,8 +235,9 @@ def test_join(self, multiindex_dataframe_random_data): b = frame.loc[frame.index[2:], ["B", "C"]] joined = a.join(b, how="outer").reindex(frame.index) - expected = frame.copy() - expected.values[np.isnan(joined.values)] = np.nan + expected = frame.copy().values + expected[np.isnan(joined.values)] = np.nan + expected = DataFrame(expected, index=frame.index, columns=frame.columns) assert not np.isnan(joined.values).all() diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py index 2339e21288bb5..24d1973eeda6d 100644 --- a/pandas/tests/io/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -121,7 +121,7 @@ def test_ambiguous_width(self): assert adjoined == expected -@td.skip_array_manager_not_yet_implemented +@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) JSON class TestTableSchemaRepr: @classmethod def setup_class(cls): diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py index d9575a6ad81e5..3131131682ccd 100644 --- a/pandas/tests/io/test_fsspec.py +++ b/pandas/tests/io/test_fsspec.py @@ -247,7 +247,7 @@ def test_pickle_options(fsspectest): tm.assert_frame_equal(df, out) -@td.skip_array_manager_not_yet_implemented +@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) JSON def test_json_options(fsspectest): df = DataFrame({"a": [0]}) df.to_json("testmem://afile", storage_options={"test": "json_write"}) diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index e5499c44be7d7..2ec94d4cebf5a 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import ( Categorical, @@ -551,6 +553,7 @@ def test_join_non_unique_period_index(self): ) tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) groupby def test_mixed_type_join_with_suffix(self): # GH #916 df = DataFrame(np.random.randn(20, 6), columns=["a", "b", "c", "d", "e", "f"]) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index d9af59382ae79..e1b1e80a29a43 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -287,17 +287,27 @@ def test_merge_copy(self): merged["d"] = "peekaboo" assert (right["d"] == "bar").all() - def test_merge_nocopy(self): + def test_merge_nocopy(self, using_array_manager): left = DataFrame({"a": 0, "b": 1}, index=range(10)) right = DataFrame({"c": "foo", "d": "bar"}, index=range(10)) merged = merge(left, right, left_index=True, right_index=True, copy=False) - merged["a"] = 6 - assert (left["a"] == 6).all() + if using_array_manager: + # With ArrayManager, setting a column doesn't change the values inplace + # and thus does not propagate the changes to the original left/right + # dataframes -> need to check that no copy was made in a different way + # TODO(ArrayManager) we should be able to simplify this with a .loc + # setitem test: merged.loc[0, "a"] = 10; assert left.loc[0, "a"] == 10 + # but this currently replaces the array (_setitem_with_indexer_split_path) + assert merged._mgr.arrays[0] is left._mgr.arrays[0] + assert merged._mgr.arrays[2] is right._mgr.arrays[0] + else: + merged["a"] = 6 + assert (left["a"] == 6).all() - merged["d"] = "peekaboo" - assert (right["d"] == "peekaboo").all() + merged["d"] = "peekaboo" + assert (right["d"] == "peekaboo").all() def test_intelligently_handle_join_key(self): # #733, be a bit more 1337 about not returning unconsolidated DataFrame @@ -1381,7 +1391,10 @@ def test_merge_readonly(self): np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"] ) - data1._mgr.blocks[0].values.flags.writeable = False + # make each underlying block array / column array read-only + for arr in data1._mgr.arrays: + arr.flags.writeable = False + data1.merge(data2) # no error
The (hopefully) "easy" part of https://github.com/pandas-dev/pandas/pull/39612 for which there seems less discussion: this separates the pieces from https://github.com/pandas-dev/pandas/pull/39612 that are needed to get `concat` with `axis=1` working (`concat_axis=0`), which at least enables the join / merge usecases.
https://api.github.com/repos/pandas-dev/pandas/pulls/39841
2021-02-16T12:10:09Z
2021-02-23T10:13:44Z
2021-02-23T10:13:44Z
2021-02-23T21:21:51Z
TST: JSON with tz roundtrip
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index e25964f556e4e..94931f9635075 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -759,8 +759,7 @@ def test_comprehensive(self): "E": pd.Series(pd.Categorical(["a", "b", "c", "c"])), "F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)), "G": [1.1, 2.2, 3.3, 4.4], - # 'H': pd.date_range('2016-01-01', freq='d', periods=4, - # tz='US/Central'), + "H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"), "I": [True, False, False, True], }, index=pd.Index(range(4), name="idx"),
test was ignored previously cause `tz` not supported. was fixed by PR #35973 might as well include the test now.
https://api.github.com/repos/pandas-dev/pandas/pulls/39840
2021-02-16T10:55:40Z
2021-02-16T13:51:36Z
2021-02-16T13:51:36Z
2021-02-16T18:03:40Z
TST/REF: collect tests by method
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index fc4809d333e57..4da9ed76844af 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1146,7 +1146,8 @@ def test_setitem_frame_mixed(self, float_string_frame): f.loc[key] = piece tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values) - # rows unaligned + def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame): + # GH#3216 rows unaligned f = float_string_frame.copy() piece = DataFrame( [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]], @@ -1159,7 +1160,8 @@ def test_setitem_frame_mixed(self, float_string_frame): f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2] ) - # key is unaligned with values + def test_setitem_frame_mixed_key_unaligned(self, float_string_frame): + # GH#3216 key is unaligned with values f = float_string_frame.copy() piece = f.loc[f.index[:2], ["A"]] piece.index = f.index[-2:] @@ -1168,7 +1170,8 @@ def test_setitem_frame_mixed(self, float_string_frame): piece["B"] = np.nan tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values) - # ndarray + def test_setitem_frame_mixed_ndarray(self, float_string_frame): + # GH#3216 ndarray f = float_string_frame.copy() piece = float_string_frame.loc[f.index[:2], ["A", "B"]] key = (f.index[slice(-2, None)], ["A", "B"]) @@ -1471,7 +1474,7 @@ def test_loc_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture): result.loc[:, idxer] = expected tm.assert_frame_equal(result, expected) - def test_at_time_between_time_datetimeindex(self): + def test_loc_setitem_time_key(self): index = date_range("2012-01-01", "2012-01-05", freq="30min") df = DataFrame(np.random.randn(len(index), 5), index=index) akey = time(12, 0, 0) @@ -1479,20 +1482,6 @@ def test_at_time_between_time_datetimeindex(self): ainds = [24, 72, 120, 168] binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172] - result = df.at_time(akey) - expected = df.loc[akey] - expected2 = df.iloc[ainds] - tm.assert_frame_equal(result, expected) - tm.assert_frame_equal(result, expected2) - assert len(result) == 4 - - result = df.between_time(bkey.start, bkey.stop) - expected = df.loc[bkey] - expected2 = df.iloc[binds] - tm.assert_frame_equal(result, expected) - tm.assert_frame_equal(result, expected2) - assert len(result) == 12 - result = df.copy() result.loc[akey] = 0 result = result.loc[akey] @@ -1529,26 +1518,11 @@ def test_loc_getitem_index_namedtuple(self): result = df.loc[IndexType("foo", "bar")]["A"] assert result == 1 - @pytest.mark.parametrize( - "tpl", - [ - (1,), - ( - 1, - 2, - ), - ], - ) + @pytest.mark.parametrize("tpl", [(1,), (1, 2)]) def test_loc_getitem_index_single_double_tuples(self, tpl): # GH 20991 idx = Index( - [ - (1,), - ( - 1, - 2, - ), - ], + [(1,), (1, 2)], name="A", tupleize_cols=False, ) diff --git a/pandas/tests/frame/methods/test_at_time.py b/pandas/tests/frame/methods/test_at_time.py index e9ac4336701a3..2d05176d20f5f 100644 --- a/pandas/tests/frame/methods/test_at_time.py +++ b/pandas/tests/frame/methods/test_at_time.py @@ -113,3 +113,16 @@ def test_at_time_axis(self, axis): result.index = result.index._with_freq(None) expected.index = expected.index._with_freq(None) tm.assert_frame_equal(result, expected) + + def test_at_time_datetimeindex(self): + index = date_range("2012-01-01", "2012-01-05", freq="30min") + df = DataFrame(np.random.randn(len(index), 5), index=index) + akey = time(12, 0, 0) + ainds = [24, 72, 120, 168] + + result = df.at_time(akey) + expected = df.loc[akey] + expected2 = df.iloc[ainds] + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected2) + assert len(result) == 4 diff --git a/pandas/tests/frame/methods/test_between_time.py b/pandas/tests/frame/methods/test_between_time.py index 073368019e0f5..0daa267767269 100644 --- a/pandas/tests/frame/methods/test_between_time.py +++ b/pandas/tests/frame/methods/test_between_time.py @@ -194,3 +194,16 @@ def test_between_time_axis_raises(self, axis): ts.columns = mask with pytest.raises(TypeError, match=msg): ts.between_time(stime, etime, axis=1) + + def test_between_time_datetimeindex(self): + index = date_range("2012-01-01", "2012-01-05", freq="30min") + df = DataFrame(np.random.randn(len(index), 5), index=index) + bkey = slice(time(13, 0, 0), time(14, 0, 0)) + binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172] + + result = df.between_time(bkey.start, bkey.stop) + expected = df.loc[bkey] + expected2 = df.iloc[binds] + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected2) + assert len(result) == 12 diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py index 4bc45e3abca32..113e870c8879b 100644 --- a/pandas/tests/frame/methods/test_describe.py +++ b/pandas/tests/frame/methods/test_describe.py @@ -391,3 +391,14 @@ def test_describe_when_include_all_exclude_not_allowed(self, exclude): msg = "exclude must be None when include is 'all'" with pytest.raises(ValueError, match=msg): df.describe(include="all", exclude=exclude) + + def test_describe_with_duplicate_columns(self): + df = DataFrame( + [[1, 1, 1], [2, 2, 2], [3, 3, 3]], + columns=["bar", "a", "a"], + dtype="float64", + ) + result = df.describe() + ser = df.iloc[:, 0].describe() + expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index 6749865367399..f92899740f95f 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -457,3 +457,13 @@ def test_drop_with_non_unique_multiindex(self): result = df.drop(index="x") expected = DataFrame([2], index=MultiIndex.from_arrays([["y"], ["j"]])) tm.assert_frame_equal(result, expected) + + def test_drop_with_duplicate_columns(self): + df = DataFrame( + [[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"] + ) + result = df.drop(["a"], axis=1) + expected = DataFrame([[1], [1], [1]], columns=["bar"]) + tm.assert_frame_equal(result, expected) + result = df.drop("a", axis=1) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index 128942cd64926..8a3ac265db154 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -644,6 +644,18 @@ def test_reindex_dups(self): with pytest.raises(ValueError, match=msg): df.reindex(index=list(range(len(df)))) + def test_reindex_with_duplicate_columns(self): + + # reindex is invalid! + df = DataFrame( + [[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"] + ) + msg = "cannot reindex from a duplicate axis" + with pytest.raises(ValueError, match=msg): + df.reindex(columns=["bar"]) + with pytest.raises(ValueError, match=msg): + df.reindex(columns=["bar", "foo"]) + def test_reindex_axis_style(self): # https://github.com/pandas-dev/pandas/issues/12392 df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py index 10ed862225c01..677d862dfe077 100644 --- a/pandas/tests/frame/methods/test_rename.py +++ b/pandas/tests/frame/methods/test_rename.py @@ -4,11 +4,14 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import ( DataFrame, Index, MultiIndex, Series, + merge, ) import pandas._testing as tm @@ -357,3 +360,45 @@ def test_rename_mapper_and_positional_arguments_raises(self): with pytest.raises(TypeError, match=msg): df.rename({}, columns={}, index={}) + + @td.skip_array_manager_not_yet_implemented + def test_rename_with_duplicate_columns(self): + # GH#4403 + df4 = DataFrame( + {"RT": [0.0454], "TClose": [22.02], "TExg": [0.0422]}, + index=MultiIndex.from_tuples( + [(600809, 20130331)], names=["STK_ID", "RPT_Date"] + ), + ) + + df5 = DataFrame( + { + "RPT_Date": [20120930, 20121231, 20130331], + "STK_ID": [600809] * 3, + "STK_Name": ["饡驦", "饡驦", "饡驦"], + "TClose": [38.05, 41.66, 30.01], + }, + index=MultiIndex.from_tuples( + [(600809, 20120930), (600809, 20121231), (600809, 20130331)], + names=["STK_ID", "RPT_Date"], + ), + ) + # TODO: can we construct this without merge? + k = merge(df4, df5, how="inner", left_index=True, right_index=True) + result = k.rename(columns={"TClose_x": "TClose", "TClose_y": "QT_Close"}) + str(result) + result.dtypes + + expected = DataFrame( + [[0.0454, 22.02, 0.0422, 20130331, 600809, "饡驦", 30.01]], + columns=[ + "RT", + "TClose", + "TExg", + "RPT_Date", + "STK_ID", + "STK_Name", + "QT_Close", + ], + ).set_index(["STK_ID", "RPT_Date"], drop=False) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_values.py b/pandas/tests/frame/methods/test_values.py index 94ea369d26b97..38e58860959b8 100644 --- a/pandas/tests/frame/methods/test_values.py +++ b/pandas/tests/frame/methods/test_values.py @@ -55,6 +55,12 @@ def test_values_duplicates(self): tm.assert_numpy_array_equal(result, expected) + def test_values_with_duplicate_columns(self): + df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=["x", "x"]) + result = df.values + expected = np.array([[1, 2.5], [3, 4.5]]) + assert (result == expected).all().all() + @pytest.mark.parametrize("constructor", [date_range, period_range]) def test_values_casts_datetimelike_to_object(self, constructor): series = Series(constructor("2000-01-01", periods=10, freq="D")) diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 26710b1f9ed73..44b6d44ee6275 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -57,6 +57,21 @@ def any(self, axis=None): class TestFrameComparisons: # Specifically _not_ flex-comparisons + def test_comparison_with_categorical_dtype(self): + # GH#12564 + + df = DataFrame({"A": ["foo", "bar", "baz"]}) + exp = DataFrame({"A": [True, False, False]}) + + res = df == "foo" + tm.assert_frame_equal(res, exp) + + # casting to categorical shouldn't affect the result + df["A"] = df["A"].astype("category") + + res = df == "foo" + tm.assert_frame_equal(res, exp) + def test_frame_in_list(self): # GH#12689 this should raise at the DataFrame level, not blocks df = DataFrame(np.random.randn(6, 4), columns=list("ABCD")) @@ -597,6 +612,26 @@ def test_flex_add_scalar_fill_value(self): res = df.add(2, fill_value=0) tm.assert_frame_equal(res, exp) + def test_sub_alignment_with_duplicate_index(self): + # GH#5185 dup aligning operations should work + df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3]) + df2 = DataFrame([1, 2, 3], index=[1, 2, 3]) + expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3]) + result = df1.sub(df2) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("op", ["__add__", "__mul__", "__sub__", "__truediv__"]) + def test_arithmetic_with_duplicate_columns(self, op): + # operations + df = DataFrame({"A": np.arange(10), "B": np.random.rand(10)}) + expected = getattr(df, op)(df) + expected.columns = ["A", "A"] + df.columns = ["A", "A"] + result = getattr(df, op)(df) + tm.assert_frame_equal(result, expected) + str(result) + result.dtypes + class TestFrameArithmetic: def test_td64_op_nat_casting(self): diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index 3e48883232243..c3812e109b938 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -4,7 +4,6 @@ import pandas as pd from pandas import ( DataFrame, - MultiIndex, Series, date_range, ) @@ -19,7 +18,7 @@ def check(result, expected=None): class TestDataFrameNonuniqueIndexes: - def test_column_dups_operations(self): + def test_setattr_columns_vs_construct_with_columns(self): # assignment # GH 3687 @@ -30,6 +29,7 @@ def test_column_dups_operations(self): expected = DataFrame(arr, columns=idx) check(df, expected) + def test_setattr_columns_vs_construct_with_columns_datetimeindx(self): idx = date_range("20130101", periods=4, freq="Q-NOV") df = DataFrame( [[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"] @@ -162,90 +162,6 @@ def test_dup_across_dtypes(self): ) check(df, expected) - def test_values_with_duplicate_columns(self): - # values - df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=["x", "x"]) - result = df.values - expected = np.array([[1, 2.5], [3, 4.5]]) - assert (result == expected).all().all() - - def test_rename_with_duplicate_columns(self): - # rename, GH 4403 - df4 = DataFrame( - {"RT": [0.0454], "TClose": [22.02], "TExg": [0.0422]}, - index=MultiIndex.from_tuples( - [(600809, 20130331)], names=["STK_ID", "RPT_Date"] - ), - ) - - df5 = DataFrame( - { - "RPT_Date": [20120930, 20121231, 20130331], - "STK_ID": [600809] * 3, - "STK_Name": ["饡驦", "饡驦", "饡驦"], - "TClose": [38.05, 41.66, 30.01], - }, - index=MultiIndex.from_tuples( - [(600809, 20120930), (600809, 20121231), (600809, 20130331)], - names=["STK_ID", "RPT_Date"], - ), - ) - - k = pd.merge(df4, df5, how="inner", left_index=True, right_index=True) - result = k.rename(columns={"TClose_x": "TClose", "TClose_y": "QT_Close"}) - str(result) - result.dtypes - - expected = DataFrame( - [[0.0454, 22.02, 0.0422, 20130331, 600809, "饡驦", 30.01]], - columns=[ - "RT", - "TClose", - "TExg", - "RPT_Date", - "STK_ID", - "STK_Name", - "QT_Close", - ], - ).set_index(["STK_ID", "RPT_Date"], drop=False) - tm.assert_frame_equal(result, expected) - - def test_reindex_with_duplicate_columns(self): - - # reindex is invalid! - df = DataFrame( - [[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"] - ) - msg = "cannot reindex from a duplicate axis" - with pytest.raises(ValueError, match=msg): - df.reindex(columns=["bar"]) - with pytest.raises(ValueError, match=msg): - df.reindex(columns=["bar", "foo"]) - - def test_drop_with_duplicate_columns(self): - - # drop - df = DataFrame( - [[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"] - ) - result = df.drop(["a"], axis=1) - expected = DataFrame([[1], [1], [1]], columns=["bar"]) - check(result, expected) - result = df.drop("a", axis=1) - check(result, expected) - - def test_describe_with_duplicate_columns(self): - # describe - df = DataFrame( - [[1, 1, 1], [2, 2, 2], [3, 3, 3]], - columns=["bar", "a", "a"], - dtype="float64", - ) - result = df.describe() - s = df.iloc[:, 0].describe() - expected = pd.concat([s, s, s], keys=df.columns, axis=1) - check(result, expected) - def test_column_dups_indexes(self): # check column dups with index equal and not equal to df's index df = DataFrame( @@ -263,17 +179,6 @@ def test_column_dups_indexes(self): this_df["A"] = index check(this_df, expected_df) - def test_arithmetic_with_dups(self): - - # operations - for op in ["__add__", "__mul__", "__sub__", "__truediv__"]: - df = DataFrame({"A": np.arange(10), "B": np.random.rand(10)}) - expected = getattr(df, op)(df) - expected.columns = ["A", "A"] - df.columns = ["A", "A"] - result = getattr(df, op)(df) - check(result, expected) - def test_changing_dtypes_with_duplicate_columns(self): # multiple assignments that change dtypes # the location indexer is a slice @@ -329,16 +234,6 @@ def test_column_dups_dropna(self): result = df.dropna(subset=["A", "C"], how="all") tm.assert_frame_equal(result, expected) - def test_column_dups_indexing(self): - - # dup aligning operations should work - # GH 5185 - df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3]) - df2 = DataFrame([1, 2, 3], index=[1, 2, 3]) - expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3]) - result = df1.sub(df2) - tm.assert_frame_equal(result, expected) - def test_dup_columns_comparisons(self): # equality df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"]) diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index 627306143788e..eaf597e6bf978 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -35,7 +35,7 @@ def test_getitem_with_scalar(self, series_with_interval_index, indexer_sl): tm.assert_series_equal(expected, indexer_sl(ser)[ser >= 2]) @pytest.mark.parametrize("direction", ["increasing", "decreasing"]) - def test_nonoverlapping_monotonic(self, direction, closed, indexer_sl): + def test_getitem_nonoverlapping_monotonic(self, direction, closed, indexer_sl): tpls = [(0, 1), (2, 3), (4, 5)] if direction == "decreasing": tpls = tpls[::-1] @@ -60,7 +60,7 @@ def test_nonoverlapping_monotonic(self, direction, closed, indexer_sl): for key, expected in zip(idx.mid, ser): assert indexer_sl(ser)[key] == expected - def test_non_matching(self, series_with_interval_index, indexer_sl): + def test_getitem_non_matching(self, series_with_interval_index, indexer_sl): ser = series_with_interval_index.copy() # this is a departure from our current @@ -72,7 +72,7 @@ def test_non_matching(self, series_with_interval_index, indexer_sl): indexer_sl(ser)[[-1, 3]] @pytest.mark.arm_slow - def test_large_series(self): + def test_loc_getitem_large_series(self): ser = Series( np.arange(1000000), index=IntervalIndex.from_breaks(np.arange(1000001)) ) diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 3b6bc42544c51..68ae1a0dd6f3d 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -450,30 +450,16 @@ def test_loc_slice(self): def test_loc_and_at_with_categorical_index(self): # GH 20629 - s = Series([1, 2, 3], index=CategoricalIndex(["A", "B", "C"])) - assert s.loc["A"] == 1 - assert s.at["A"] == 1 df = DataFrame( [[1, 2], [3, 4], [5, 6]], index=CategoricalIndex(["A", "B", "C"]) ) - assert df.loc["B", 1] == 4 - assert df.at["B", 1] == 4 - - def test_indexing_with_category(self): - - # https://github.com/pandas-dev/pandas/issues/12564 - # consistent result if comparing as Dataframe - cat = DataFrame({"A": ["foo", "bar", "baz"]}) - exp = DataFrame({"A": [True, False, False]}) - - res = cat[["A"]] == "foo" - tm.assert_frame_equal(res, exp) - - cat["A"] = cat["A"].astype("category") + s = df[0] + assert s.loc["A"] == 1 + assert s.at["A"] == 1 - res = cat[["A"]] == "foo" - tm.assert_frame_equal(res, exp) + assert df.loc["B", 1] == 4 + assert df.at["B", 1] == 4 @pytest.mark.parametrize( "idx_values", @@ -501,7 +487,7 @@ def test_indexing_with_category(self): pd.timedelta_range(start="1d", periods=3).array, ], ) - def test_loc_with_non_string_categories(self, idx_values, ordered): + def test_loc_getitem_with_non_string_categories(self, idx_values, ordered): # GH-17569 cat_idx = CategoricalIndex(idx_values, ordered=ordered) df = DataFrame({"A": ["foo", "bar", "baz"]}, index=cat_idx) diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index 7cad0f92b06a3..28a1098c10d9f 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -25,22 +25,17 @@ def test_indexing_with_datetime_tz(self): df.iloc[1, 1] = pd.NaT df.iloc[1, 2] = pd.NaT - # indexing - result = df.iloc[1] expected = Series( [Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT], index=list("ABC"), dtype="object", name=1, ) + + # indexing + result = df.iloc[1] tm.assert_series_equal(result, expected) result = df.loc[1] - expected = Series( - [Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT], - index=list("ABC"), - dtype="object", - name=1, - ) tm.assert_series_equal(result, expected) def test_indexing_fast_xs(self): @@ -224,12 +219,14 @@ def test_nanosecond_getitem_setitem_with_tz(self): expected = DataFrame(-1, index=index, columns=["a"]) tm.assert_frame_equal(result, expected) - def test_loc_setitem_with_existing_dst(self): + def test_loc_setitem_with_expansion_and_existing_dst(self): # GH 18308 start = Timestamp("2017-10-29 00:00:00+0200", tz="Europe/Madrid") end = Timestamp("2017-10-29 03:00:00+0100", tz="Europe/Madrid") ts = Timestamp("2016-10-10 03:00:00", tz="Europe/Madrid") idx = pd.date_range(start, end, closed="left", freq="H") + assert ts not in idx # i.e. result.loc setitem is with-expansion + result = DataFrame(index=idx, columns=["value"]) result.loc[ts, "value"] = 12 expected = DataFrame( diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index a4a7ef0860c15..efd99df9a5e4f 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -59,6 +59,9 @@ def test_setitem_ndarray_1d(self): ) tm.assert_series_equal(result, expected) + def test_setitem_ndarray_1d_2(self): + # GH5508 + # dtype getting changed? df = DataFrame(index=Index(np.arange(1, 11))) df["foo"] = np.zeros(10, dtype=np.float64) @@ -139,7 +142,7 @@ def test_inf_upcast(self): expected = pd.Float64Index([1, 2, np.inf]) tm.assert_index_equal(result, expected) - def test_inf_upcast_empty(self): + def test_loc_setitem_with_expasnion_inf_upcast_empty(self): # Test with np.inf in columns df = DataFrame() df.loc[0, 0] = 1 @@ -293,6 +296,8 @@ def test_dups_fancy_indexing2(self): with pytest.raises(KeyError, match="with any missing labels"): df.loc[:, ["A", "B", "C"]] + def test_dups_fancy_indexing3(self): + # GH 6504, multi-axis indexing df = DataFrame( np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"] @@ -506,6 +511,7 @@ def test_setitem_list(self): tm.assert_frame_equal(result, df) + def test_iloc_setitem_custom_object(self): # iloc with an object class TO: def __init__(self, value): @@ -551,6 +557,9 @@ def test_string_slice(self): with pytest.raises(KeyError, match="'2011'"): df.loc["2011", 0] + def test_string_slice_empty(self): + # GH 14424 + df = DataFrame() assert not df.index._is_all_dates with pytest.raises(KeyError, match="'2011'"): @@ -595,6 +604,7 @@ def test_astype_assignment(self): ) tm.assert_frame_equal(df, expected) + def test_astype_assignment_full_replacements(self): # full replacements / no nans df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]}) df.iloc[:, 0] = df["A"].astype(np.int64) @@ -658,9 +668,9 @@ class TestMisc: def test_float_index_to_mixed(self): df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)}) df["a"] = 10 - tm.assert_frame_equal( - DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10}), df - ) + + expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10}) + tm.assert_frame_equal(expected, df) def test_float_index_non_scalar_assignment(self): df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0]) @@ -745,12 +755,10 @@ def assert_slices_equivalent(l_slc, i_slc): assert_slices_equivalent(SLC[idx[13] : idx[9] : -1], SLC[13:8:-1]) assert_slices_equivalent(SLC[idx[9] : idx[13] : -1], SLC[:0]) - def test_slice_with_zero_step_raises(self): - s = Series(np.arange(20), index=_mklbl("A", 20)) + def test_slice_with_zero_step_raises(self, indexer_sl): + ser = Series(np.arange(20), index=_mklbl("A", 20)) with pytest.raises(ValueError, match="slice step cannot be zero"): - s[::0] - with pytest.raises(ValueError, match="slice step cannot be zero"): - s.loc[::0] + indexer_sl(ser)[::0] def test_indexing_assignment_dict_already_exists(self): index = Index([-5, 0, 5], name="z") @@ -935,7 +943,7 @@ def test_none_coercion_mixed_dtypes(self): class TestDatetimelikeCoercion: def test_setitem_dt64_string_scalar(self, tz_naive_fixture, indexer_sli): - # dispatching _can_hold_element to underling DatetimeArray + # dispatching _can_hold_element to underlying DatetimeArray tz = tz_naive_fixture dti = date_range("2016-01-01", periods=3, tz=tz) diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index 740d029effc94..64d763f410666 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -15,6 +15,8 @@ timezones, ) +from pandas.core.dtypes.common import is_scalar + import pandas as pd from pandas import ( Categorical, @@ -533,3 +535,63 @@ def test_getitem_preserve_name(datetime_series): result = datetime_series[5:10] assert result.name == datetime_series.name + + +def test_getitem_with_integer_labels(): + # integer indexes, be careful + ser = Series(np.random.randn(10), index=list(range(0, 20, 2))) + inds = [0, 2, 5, 7, 8] + arr_inds = np.array([0, 2, 5, 7, 8]) + with pytest.raises(KeyError, match="with any missing labels"): + ser[inds] + + with pytest.raises(KeyError, match="with any missing labels"): + ser[arr_inds] + + +def test_getitem_missing(datetime_series): + # missing + d = datetime_series.index[0] - BDay() + msg = r"Timestamp\('1999-12-31 00:00:00', freq='B'\)" + with pytest.raises(KeyError, match=msg): + datetime_series[d] + + +def test_getitem_fancy(string_series, object_series): + slice1 = string_series[[1, 2, 3]] + slice2 = object_series[[1, 2, 3]] + assert string_series.index[2] == slice1.index[1] + assert object_series.index[2] == slice2.index[1] + assert string_series[2] == slice1[1] + assert object_series[2] == slice2[1] + + +def test_getitem_box_float64(datetime_series): + value = datetime_series[5] + assert isinstance(value, np.float64) + + +def test_getitem_unordered_dup(): + obj = Series(range(5), index=["c", "a", "a", "b", "b"]) + assert is_scalar(obj["c"]) + assert obj["c"] == 0 + + +def test_getitem_dups(): + ser = Series(range(5), index=["A", "A", "B", "C", "C"], dtype=np.int64) + expected = Series([3, 4], index=["C", "C"], dtype=np.int64) + result = ser["C"] + tm.assert_series_equal(result, expected) + + +def test_getitem_categorical_str(): + # GH#31765 + ser = Series(range(5), index=Categorical(["a", "b", "c", "a", "b"])) + result = ser["a"] + expected = ser.iloc[[0, 3]] + tm.assert_series_equal(result, expected) + + # Check the intermediate steps work as expected + with tm.assert_produces_warning(FutureWarning): + result = ser.index.get_value(ser, "a") + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index e047317acd24d..4ac50105f078c 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -5,8 +5,6 @@ import numpy as np import pytest -from pandas.core.dtypes.common import is_scalar - import pandas as pd from pandas import ( Categorical, @@ -22,8 +20,6 @@ ) import pandas._testing as tm -from pandas.tseries.offsets import BDay - def test_basic_indexing(): s = Series(np.random.randn(5), index=["a", "b", "a", "a", "b"]) @@ -58,18 +54,6 @@ def test_basic_getitem_with_labels(datetime_series): tm.assert_series_equal(result, expected) -def test_basic_getitem_with_integer_labels(): - # integer indexes, be careful - ser = Series(np.random.randn(10), index=list(range(0, 20, 2))) - inds = [0, 2, 5, 7, 8] - arr_inds = np.array([0, 2, 5, 7, 8]) - with pytest.raises(KeyError, match="with any missing labels"): - ser[inds] - - with pytest.raises(KeyError, match="with any missing labels"): - ser[arr_inds] - - def test_basic_getitem_dt64tz_values(): # GH12089 @@ -98,24 +82,7 @@ def test_getitem_setitem_ellipsis(): assert (result == 5).all() -def test_getitem_missing(datetime_series): - # missing - d = datetime_series.index[0] - BDay() - msg = r"Timestamp\('1999-12-31 00:00:00', freq='B'\)" - with pytest.raises(KeyError, match=msg): - datetime_series[d] - - -def test_getitem_fancy(string_series, object_series): - slice1 = string_series[[1, 2, 3]] - slice2 = object_series[[1, 2, 3]] - assert string_series.index[2] == slice1.index[1] - assert object_series.index[2] == slice2.index[1] - assert string_series[2] == slice1[1] - assert object_series[2] == slice2[1] - - -def test_type_promotion(): +def test_setitem_with_expansion_type_promotion(): # GH12599 s = Series(dtype=object) s["a"] = Timestamp("2016-01-01") @@ -157,11 +124,6 @@ def test_getitem_setitem_integers(): tm.assert_almost_equal(s["a"], 5) -def test_getitem_box_float64(datetime_series): - value = datetime_series[5] - assert isinstance(value, np.float64) - - def test_series_box_timestamp(): rng = pd.date_range("20090415", "20090519", freq="B") ser = Series(rng) @@ -189,49 +151,26 @@ def test_series_box_timedelta(): assert isinstance(ser.iloc[4], Timedelta) -def test_getitem_ambiguous_keyerror(): - s = Series(range(10), index=list(range(0, 20, 2))) +def test_getitem_ambiguous_keyerror(indexer_sl): + ser = Series(range(10), index=list(range(0, 20, 2))) with pytest.raises(KeyError, match=r"^1$"): - s[1] - with pytest.raises(KeyError, match=r"^1$"): - s.loc[1] - + indexer_sl(ser)[1] -def test_getitem_unordered_dup(): - obj = Series(range(5), index=["c", "a", "a", "b", "b"]) - assert is_scalar(obj["c"]) - assert obj["c"] == 0 - -def test_getitem_dups_with_missing(): +def test_getitem_dups_with_missing(indexer_sl): # breaks reindex, so need to use .loc internally # GH 4246 - s = Series([1, 2, 3, 4], ["foo", "bar", "foo", "bah"]) - with pytest.raises(KeyError, match="with any missing labels"): - s.loc[["foo", "bar", "bah", "bam"]] - + ser = Series([1, 2, 3, 4], ["foo", "bar", "foo", "bah"]) with pytest.raises(KeyError, match="with any missing labels"): - s[["foo", "bar", "bah", "bam"]] + indexer_sl(ser)[["foo", "bar", "bah", "bam"]] -def test_getitem_dups(): - s = Series(range(5), index=["A", "A", "B", "C", "C"], dtype=np.int64) - expected = Series([3, 4], index=["C", "C"], dtype=np.int64) - result = s["C"] - tm.assert_series_equal(result, expected) - - -def test_setitem_ambiguous_keyerror(): +def test_setitem_ambiguous_keyerror(indexer_sl): s = Series(range(10), index=list(range(0, 20, 2))) # equivalent of an append s2 = s.copy() - s2[1] = 5 - expected = s.append(Series([5], index=[1])) - tm.assert_series_equal(s2, expected) - - s2 = s.copy() - s2.loc[1] = 5 + indexer_sl(s2)[1] = 5 expected = s.append(Series([5], index=[1])) tm.assert_series_equal(s2, expected) @@ -314,13 +253,10 @@ def test_basic_getitem_setitem_corner(datetime_series): @pytest.mark.parametrize("tz", ["US/Eastern", "UTC", "Asia/Tokyo"]) -def test_setitem_with_tz(tz): +def test_setitem_with_tz(tz, indexer_sli): orig = Series(pd.date_range("2016-01-01", freq="H", periods=3, tz=tz)) assert orig.dtype == f"datetime64[ns, {tz}]" - # scalar - s = orig.copy() - s[1] = Timestamp("2011-01-01", tz=tz) exp = Series( [ Timestamp("2016-01-01 00:00", tz=tz), @@ -328,15 +264,11 @@ def test_setitem_with_tz(tz): Timestamp("2016-01-01 02:00", tz=tz), ] ) - tm.assert_series_equal(s, exp) - s = orig.copy() - s.loc[1] = Timestamp("2011-01-01", tz=tz) - tm.assert_series_equal(s, exp) - - s = orig.copy() - s.iloc[1] = Timestamp("2011-01-01", tz=tz) - tm.assert_series_equal(s, exp) + # scalar + ser = orig.copy() + indexer_sli(ser)[1] = Timestamp("2011-01-01", tz=tz) + tm.assert_series_equal(ser, exp) # vector vals = Series( @@ -345,7 +277,6 @@ def test_setitem_with_tz(tz): ) assert vals.dtype == f"datetime64[ns, {tz}]" - s[[1, 2]] = vals exp = Series( [ Timestamp("2016-01-01 00:00", tz=tz), @@ -353,26 +284,18 @@ def test_setitem_with_tz(tz): Timestamp("2012-01-01 00:00", tz=tz), ] ) - tm.assert_series_equal(s, exp) - s = orig.copy() - s.loc[[1, 2]] = vals - tm.assert_series_equal(s, exp) - - s = orig.copy() - s.iloc[[1, 2]] = vals - tm.assert_series_equal(s, exp) + ser = orig.copy() + indexer_sli(ser)[[1, 2]] = vals + tm.assert_series_equal(ser, exp) -def test_setitem_with_tz_dst(): +def test_setitem_with_tz_dst(indexer_sli): # GH XXX TODO: fill in GH ref tz = "US/Eastern" orig = Series(pd.date_range("2016-11-06", freq="H", periods=3, tz=tz)) assert orig.dtype == f"datetime64[ns, {tz}]" - # scalar - s = orig.copy() - s[1] = Timestamp("2011-01-01", tz=tz) exp = Series( [ Timestamp("2016-11-06 00:00-04:00", tz=tz), @@ -380,15 +303,11 @@ def test_setitem_with_tz_dst(): Timestamp("2016-11-06 01:00-05:00", tz=tz), ] ) - tm.assert_series_equal(s, exp) - s = orig.copy() - s.loc[1] = Timestamp("2011-01-01", tz=tz) - tm.assert_series_equal(s, exp) - - s = orig.copy() - s.iloc[1] = Timestamp("2011-01-01", tz=tz) - tm.assert_series_equal(s, exp) + # scalar + ser = orig.copy() + indexer_sli(ser)[1] = Timestamp("2011-01-01", tz=tz) + tm.assert_series_equal(ser, exp) # vector vals = Series( @@ -397,7 +316,6 @@ def test_setitem_with_tz_dst(): ) assert vals.dtype == f"datetime64[ns, {tz}]" - s[[1, 2]] = vals exp = Series( [ Timestamp("2016-11-06 00:00", tz=tz), @@ -405,15 +323,10 @@ def test_setitem_with_tz_dst(): Timestamp("2012-01-01 00:00", tz=tz), ] ) - tm.assert_series_equal(s, exp) - s = orig.copy() - s.loc[[1, 2]] = vals - tm.assert_series_equal(s, exp) - - s = orig.copy() - s.iloc[[1, 2]] = vals - tm.assert_series_equal(s, exp) + ser = orig.copy() + indexer_sli(ser)[[1, 2]] = vals + tm.assert_series_equal(ser, exp) def test_categorical_assigning_ops(): @@ -453,19 +366,6 @@ def test_setitem_nan_into_categorical(): tm.assert_series_equal(ser, exp) -def test_getitem_categorical_str(): - # GH#31765 - ser = Series(range(5), index=Categorical(["a", "b", "c", "a", "b"])) - result = ser["a"] - expected = ser.iloc[[0, 3]] - tm.assert_series_equal(result, expected) - - # Check the intermediate steps work as expected - with tm.assert_produces_warning(FutureWarning): - result = ser.index.get_value(ser, "a") - tm.assert_series_equal(result, expected) - - def test_slice(string_series, object_series): numSlice = string_series[10:20] numSliceEnd = string_series[-10:]
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39832
2021-02-16T04:03:36Z
2021-02-16T23:26:34Z
2021-02-16T23:26:33Z
2021-02-16T23:34:50Z
TST/REF: share Series setitem tests
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index e047317acd24d..7889d46f66c99 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -542,56 +542,6 @@ def test_setitem_td64_non_nano(): tm.assert_series_equal(ser, expected) -@pytest.mark.parametrize( - "nat_val", - [ - pd.NaT, - np.timedelta64("NaT", "ns"), - np.datetime64("NaT", "ns"), - ], -) -@pytest.mark.parametrize("tz", [None, "UTC"]) -def test_dt64_series_assign_nat(nat_val, tz, indexer_sli): - # some nat-like values should be cast to datetime64 when inserting - # into a datetime64 series. Others should coerce to object - # and retain their dtypes. - dti = pd.date_range("2016-01-01", periods=3, tz=tz) - base = Series(dti) - expected = Series([pd.NaT] + list(dti[1:]), dtype=dti.dtype) - - should_cast = nat_val is pd.NaT or base.dtype == nat_val.dtype - if not should_cast: - expected = expected.astype(object) - - ser = base.copy(deep=True) - indexer_sli(ser)[0] = nat_val - tm.assert_series_equal(ser, expected) - - -@pytest.mark.parametrize( - "nat_val", - [ - pd.NaT, - np.timedelta64("NaT", "ns"), - np.datetime64("NaT", "ns"), - ], -) -def test_td64_series_assign_nat(nat_val, indexer_sli): - # some nat-like values should be cast to timedelta64 when inserting - # into a timedelta64 series. Others should coerce to object - # and retain their dtypes. - base = Series([0, 1, 2], dtype="m8[ns]") - expected = Series([pd.NaT, 1, 2], dtype="m8[ns]") - - should_cast = nat_val is pd.NaT or base.dtype == nat_val.dtype - if not should_cast: - expected = expected.astype(object) - - ser = base.copy(deep=True) - indexer_sli(ser)[0] = nat_val - tm.assert_series_equal(ser, expected) - - def test_underlying_data_conversion(): # GH 4080 df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]}) diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 36948c3dc05f3..18a870b1b0b9c 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -167,15 +167,6 @@ def test_setitem_boolean_python_list(self, func): expected = Series(["a", "b", "c"]) tm.assert_series_equal(ser, expected) - @pytest.mark.parametrize("value", [None, NaT, np.nan]) - def test_setitem_boolean_td64_values_cast_na(self, value): - # GH#18586 - series = Series([0, 1, 2], dtype="timedelta64[ns]") - mask = series == series[0] - series[mask] = value - expected = Series([NaT, 1, 2], dtype="timedelta64[ns]") - tm.assert_series_equal(series, expected) - def test_setitem_boolean_nullable_int_types(self, any_nullable_numeric_dtype): # GH: 26468 ser = Series([5, 6, 7, 8], dtype=any_nullable_numeric_dtype) @@ -640,62 +631,43 @@ def is_inplace(self): return True -class TestSetitemNATimedelta64Dtype(SetitemCastingEquivalents): - # some nat-like values should be cast to timedelta64 when inserting - # into a timedelta64 series. Others should coerce to object - # and retain their dtypes. - - @pytest.fixture - def obj(self): - return Series([0, 1, 2], dtype="m8[ns]") +class TestSetitemNADatetimeLikeDtype(SetitemCastingEquivalents): + # some nat-like values should be cast to datetime64/timedelta64 when + # inserting into a datetime64/timedelta64 series. Others should coerce + # to object and retain their dtypes. + # GH#18586 for td64 and boolean mask case @pytest.fixture( - params=[NaT, np.timedelta64("NaT", "ns"), np.datetime64("NaT", "ns")] + params=["m8[ns]", "M8[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Central]"] ) - def val(self, request): + def dtype(self, request): return request.param @pytest.fixture - def is_inplace(self, val): - # cast to object iff val is datetime64("NaT") - return val is NaT or val.dtype.kind == "m" - - @pytest.fixture - def expected(self, obj, val, is_inplace): - dtype = obj.dtype if is_inplace else object - expected = Series([val] + list(obj[1:]), dtype=dtype) - return expected - - @pytest.fixture - def key(self): - return 0 - - -class TestSetitemNADatetime64Dtype(SetitemCastingEquivalents): - # some nat-like values should be cast to datetime64 when inserting - # into a datetime64 series. Others should coerce to object - # and retain their dtypes. - - @pytest.fixture(params=[None, "UTC", "US/Central"]) - def obj(self, request): - tz = request.param - dti = date_range("2016-01-01", periods=3, tz=tz) - return Series(dti) + def obj(self, dtype): + i8vals = date_range("2016-01-01", periods=3).asi8 + idx = Index(i8vals, dtype=dtype) + assert idx.dtype == dtype + return Series(idx) @pytest.fixture( - params=[NaT, np.timedelta64("NaT", "ns"), np.datetime64("NaT", "ns")] + params=[ + None, + np.nan, + NaT, + np.timedelta64("NaT", "ns"), + np.datetime64("NaT", "ns"), + ] ) def val(self, request): return request.param @pytest.fixture def is_inplace(self, val, obj): - if obj._values.tz is None: - # cast to object iff val is timedelta64("NaT") - return val is NaT or val.dtype.kind == "M" - - # otherwise we have to exclude tznaive dt64("NaT") - return val is NaT + # td64 -> cast to object iff val is datetime64("NaT") + # dt64 -> cast to object iff val is timedelta64("NaT") + # dt64tz -> cast to object with anything _but_ NaT + return val is NaT or val is None or val is np.nan or obj.dtype == val.dtype @pytest.fixture def expected(self, obj, val, is_inplace):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39831
2021-02-16T02:34:46Z
2021-02-16T18:49:33Z
2021-02-16T18:49:33Z
2021-02-16T18:51:26Z
BUG: DataFrame.append broken on master
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index a2c930f6d9b22..01d8cde3e9af2 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -291,9 +291,7 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: if len(values) and values[0] is None: fill_value = None - if is_datetime64tz_dtype(blk_dtype) or is_datetime64tz_dtype( - empty_dtype - ): + if is_datetime64tz_dtype(empty_dtype): # TODO(EA2D): special case unneeded with 2D EAs i8values = np.full(self.shape[1], fill_value.value) return DatetimeArray(i8values, dtype=empty_dtype) @@ -302,9 +300,8 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: elif is_extension_array_dtype(blk_dtype): pass elif is_extension_array_dtype(empty_dtype): - missing_arr = empty_dtype.construct_array_type()._from_sequence( - [], dtype=empty_dtype - ) + cls = empty_dtype.construct_array_type() + missing_arr = cls._from_sequence([], dtype=empty_dtype) ncols, nrows = self.shape assert ncols == 1, ncols empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
Should get the CI to green.
https://api.github.com/repos/pandas-dev/pandas/pulls/39829
2021-02-16T02:05:55Z
2021-02-16T04:02:07Z
2021-02-16T04:02:07Z
2021-02-16T04:02:37Z
BUG: PandasArray._from_sequence with list-of-tuples
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 9999a9ed411d8..8a8c4be012d99 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -10,6 +10,7 @@ from pandas._typing import Dtype, NpDtype, Scalar from pandas.compat.numpy import function as nv +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.dtypes import PandasDtype from pandas.core.dtypes.missing import isna @@ -86,6 +87,14 @@ def _from_sequence( dtype = dtype._dtype result = np.asarray(scalars, dtype=dtype) + if ( + result.ndim > 1 + and not hasattr(scalars, "dtype") + and (dtype is None or dtype == object) + ): + # e.g. list-of-tuples + result = construct_1d_object_array_from_listlike(scalars) + if copy and result is scalars: result = result.copy() return cls(result) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 3c27e34dcbcf6..0bdfb7ffb20d3 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -182,10 +182,17 @@ def _check_ndim(self, values, ndim): if ndim is None: ndim = values.ndim - if self._validate_ndim and values.ndim != ndim: + if self._validate_ndim: + if values.ndim != ndim: + raise ValueError( + "Wrong number of dimensions. " + f"values.ndim != ndim [{values.ndim} != {ndim}]" + ) + elif values.ndim > ndim: + # ExtensionBlock raise ValueError( "Wrong number of dimensions. " - f"values.ndim != ndim [{values.ndim} != {ndim}]" + f"values.ndim > ndim [{values.ndim} > {ndim}]" ) return ndim @@ -2178,28 +2185,6 @@ def fillna( value, limit=limit, inplace=inplace, downcast=downcast ) - def _check_ndim(self, values, ndim): - """ - ndim inference and validation. - - This is overridden by the DatetimeTZBlock to check the case of 2D - data (values.ndim == 2), which should only be allowed if ndim is - also 2. - The case of 1D array is still allowed with both ndim of 1 or 2, as - if the case for other EAs. Therefore, we are only checking - `values.ndim > ndim` instead of `values.ndim != ndim` as for - consolidated blocks. - """ - if ndim is None: - ndim = values.ndim - - if values.ndim > ndim: - raise ValueError( - "Wrong number of dimensions. " - f"values.ndim != ndim [{values.ndim} != {ndim}]" - ) - return ndim - class TimeDeltaBlock(DatetimeLikeBlockMixin): __slots__ = () diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index a5b54bc153f5d..1e876d137319d 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -186,11 +186,6 @@ def test_getitem_scalar(self, data): # AssertionError super().test_getitem_scalar(data) - @skip_nested - def test_take_series(self, data): - # ValueError: PandasArray must be 1-dimensional. - super().test_take_series(data) - class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests): def test_groupby_extension_apply( @@ -219,13 +214,6 @@ def test_shift_fill_value(self, data): # np.array shape inference. Shift implementation fails. super().test_shift_fill_value(data) - @skip_nested - @pytest.mark.parametrize("box", [pd.Series, lambda x: x]) - @pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique]) - def test_unique(self, data, box, method): - # Fails creating expected - super().test_unique(data, box, method) - @skip_nested def test_fillna_copy_frame(self, data_missing): # The "scalar" for this array isn't a scalar. @@ -241,31 +229,10 @@ def test_searchsorted(self, data_for_sorting, as_series): # Test setup fails. super().test_searchsorted(data_for_sorting, as_series) - @skip_nested - def test_where_series(self, data, na_value, as_frame): - # Test setup fails. - super().test_where_series(data, na_value, as_frame) - - @pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]]) - def test_repeat(self, data, repeats, as_series, use_numpy, request): - if data.dtype.numpy_dtype == object and repeats != 0: - mark = pytest.mark.xfail(reason="mask shapes mismatch") - request.node.add_marker(mark) - super().test_repeat(data, repeats, as_series, use_numpy) - @pytest.mark.xfail(reason="PandasArray.diff may fail on dtype") def test_diff(self, data, periods): return super().test_diff(data, periods) - @pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame]) - def test_equals(self, data, na_value, as_series, box, request): - # Fails creating with _from_sequence - if box is pd.DataFrame and data.dtype.numpy_dtype == object: - mark = pytest.mark.xfail(reason="AssertionError in _get_same_shape_values") - request.node.add_marker(mark) - - super().test_equals(data, na_value, as_series, box) - class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests): divmod_exc = None @@ -286,8 +253,11 @@ def test_divmod_series_array(self, data): def test_arith_series_with_scalar(self, data, all_arithmetic_operators): super().test_arith_series_with_scalar(data, all_arithmetic_operators) - @skip_nested - def test_arith_series_with_array(self, data, all_arithmetic_operators): + def test_arith_series_with_array(self, data, all_arithmetic_operators, request): + opname = all_arithmetic_operators + if data.dtype.numpy_dtype == object and opname not in ["__add__", "__radd__"]: + mark = pytest.mark.xfail(reason="Fails for object dtype") + request.node.add_marker(mark) super().test_arith_series_with_array(data, all_arithmetic_operators) @skip_nested @@ -322,11 +292,6 @@ def test_fillna_scalar(self, data_missing): # Non-scalar "scalar" values. super().test_fillna_scalar(data_missing) - @skip_nested - def test_fillna_series_method(self, data_missing, fillna_method): - # Non-scalar "scalar" values. - super().test_fillna_series_method(data_missing, fillna_method) - @skip_nested def test_fillna_series(self, data_missing): # Non-scalar "scalar" values. @@ -355,20 +320,6 @@ def test_merge(self, data, na_value): # Fails creating expected (key column becomes a PandasDtype because) super().test_merge(data, na_value) - @skip_nested - def test_merge_on_extension_array(self, data): - # Fails creating expected - super().test_merge_on_extension_array(data) - - @skip_nested - def test_merge_on_extension_array_duplicates(self, data): - # Fails creating expected - super().test_merge_on_extension_array_duplicates(data) - - @skip_nested - def test_transpose_frame(self, data): - super().test_transpose_frame(data) - class TestSetitem(BaseNumPyTests, base.BaseSetitemTests): @skip_nested
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39828
2021-02-15T23:15:00Z
2021-02-17T01:33:01Z
2021-02-17T01:33:01Z
2021-02-17T01:45:36Z
Backport PR #39800 on branch 1.2.x (Regression in to_excel when setting duplicate column names)
diff --git a/doc/source/whatsnew/v1.2.3.rst b/doc/source/whatsnew/v1.2.3.rst index e675b3ea921d1..4231b6d94b1b9 100644 --- a/doc/source/whatsnew/v1.2.3.rst +++ b/doc/source/whatsnew/v1.2.3.rst @@ -15,7 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- +- Fixed regression in :func:`pandas.to_excel` raising ``KeyError`` when giving duplicate columns with ``columns`` attribute (:issue:`39695`) - .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 0cad67169feff..6e77406948202 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -465,7 +465,7 @@ def __init__( if not len(Index(cols).intersection(df.columns)): raise KeyError("passes columns are not ALL present dataframe") - if len(Index(cols).intersection(df.columns)) != len(cols): + if len(Index(cols).intersection(df.columns)) != len(set(cols)): # Deprecated in GH#17295, enforced in 1.0.0 raise KeyError("Not all names specified in 'columns' are found") diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index af0de05965398..6d0684d4d1315 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -1295,6 +1295,15 @@ def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path): with pytest.raises(ValueError, match="Excel does not support"): df.to_excel(path) + def test_excel_duplicate_columns_with_names(self, path): + # GH#39695 + df = DataFrame({"A": [0, 1], "B": [10, 11]}) + df.to_excel(path, columns=["A", "B", "A"], index=False) + + result = pd.read_excel(path) + expected = DataFrame([[0, 10, 0], [1, 11, 1]], columns=["A", "B", "A.1"]) + tm.assert_frame_equal(result, expected) + class TestExcelWriterEngineTests: @pytest.mark.parametrize(
Backport PR #39800: Regression in to_excel when setting duplicate column names
https://api.github.com/repos/pandas-dev/pandas/pulls/39827
2021-02-15T22:43:06Z
2021-02-16T09:41:00Z
2021-02-16T09:41:00Z
2021-02-16T09:41:00Z
PERF: use arr.size instead of np.prod(arr.shape) in _can_use_numexpr
diff --git a/pandas/core/array_algos/transforms.py b/pandas/core/array_algos/transforms.py index 371425f325d76..1dde9b221a90b 100644 --- a/pandas/core/array_algos/transforms.py +++ b/pandas/core/array_algos/transforms.py @@ -19,7 +19,7 @@ def shift(values: np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray new_values = new_values.T axis = new_values.ndim - axis - 1 - if np.prod(new_values.shape): + if new_values.size: new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis) axis_indexer = [slice(None)] * values.ndim diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 087b7f39e3374..ca62e8a31be7a 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -76,7 +76,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): if op_str is not None: # required min elements (otherwise we are adding overhead) - if np.prod(a.shape) > _MIN_ELEMENTS: + if a.size > _MIN_ELEMENTS: # check for dtype compatibility dtypes: Set[str] = set() for o in [a, b]: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index e27c519304e2e..2fc6aa9842b73 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1497,7 +1497,7 @@ def maybe_cast_to_datetime(value, dtype: Optional[DtypeObj]): value = iNaT # we have an array of datetime or timedeltas & nulls - elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype): + elif value.size or not is_dtype_equal(value.dtype, dtype): _disallow_mismatched_datetimelike(value, dtype) try: diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index ef645313de614..657b10c46fed7 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -429,7 +429,7 @@ def array_equivalent( # NaNs can occur in float and complex arrays. if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype): - if not (np.prod(left.shape) and np.prod(right.shape)): + if not (left.size and right.size): return True return ((left == right) | (isna(left) & isna(right))).all()
xref https://github.com/pandas-dev/pandas/pull/39772 Using `np.prod` gives quite some overhead, and I *think* it's always equivalent to `.size` ? ``` In [1]: arr = np.random.randn(3, 2) In [2]: %timeit arr.size 42.8 ns ± 0.628 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each) In [3]: %timeit np.prod(arr.shape) 6.63 µs ± 344 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/39825
2021-02-15T19:19:54Z
2021-02-16T22:55:57Z
2021-02-16T22:55:57Z
2021-02-17T07:09:29Z
REF: remove unnecessary Block attrs
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 06bf2e5d7b18e..5fae48ae5d4b2 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -99,9 +99,6 @@ class Block(PandasObject): __slots__ = ["_mgr_locs", "values", "ndim"] is_numeric = False is_float = False - is_datetime = False - is_datetimetz = False - is_timedelta = False is_bool = False is_object = False is_extension = False @@ -213,11 +210,6 @@ def is_view(self) -> bool: def is_categorical(self) -> bool: return self._holder is Categorical - @property - def is_datelike(self) -> bool: - """ return True if I am a non-datelike """ - return self.is_datetime or self.is_timedelta - def external_values(self): """ The array that Series.values returns (public attribute). @@ -547,7 +539,8 @@ def _maybe_downcast(self, blocks: List[Block], downcast=None) -> List[Block]: # no need to downcast our float # unless indicated - if downcast is None and (self.is_float or self.is_datelike): + if downcast is None and self.dtype.kind in ["f", "m", "M"]: + # TODO: complex? more generally, self._can_hold_na? return blocks return extend_blocks([b.downcast(downcast) for b in blocks]) @@ -634,13 +627,12 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"): raise newb = self.make_block(new_values) - if newb.is_numeric and self.is_numeric: - if newb.shape != self.shape: - raise TypeError( - f"cannot set astype for copy = [{copy}] for dtype " - f"({self.dtype.name} [{self.shape}]) to different shape " - f"({newb.dtype.name} [{newb.shape}])" - ) + if newb.shape != self.shape: + raise TypeError( + f"cannot set astype for copy = [{copy}] for dtype " + f"({self.dtype.name} [{self.shape}]) to different shape " + f"({newb.dtype.name} [{newb.shape}])" + ) return newb def _astype(self, dtype: DtypeObj, copy: bool) -> ArrayLike: @@ -2089,7 +2081,6 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): values: DatetimeArray __slots__ = () - is_datetimetz = True is_extension = True _holder = DatetimeArray @@ -2167,7 +2158,7 @@ def get_values(self, dtype: Optional[DtypeObj] = None) -> np.ndarray: def external_values(self): # NB: this is different from np.asarray(self.values), since that # return an object-dtype ndarray of Timestamps. - # avoid FutureWarning in .astype in casting from dt64t to dt64 + # Avoid FutureWarning in .astype in casting from dt64tz to dt64 return self.values._data def fillna( @@ -2208,7 +2199,6 @@ def _check_ndim(self, values, ndim): class TimeDeltaBlock(DatetimeLikeBlockMixin): __slots__ = () - is_timedelta = True _can_hold_na = True is_numeric = False _holder = TimedeltaArray diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 01d8cde3e9af2..c65d104021a47 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -12,7 +12,6 @@ from pandas.core.dtypes.cast import ensure_dtype_can_hold_na, find_common_type from pandas.core.dtypes.common import ( - is_categorical_dtype, is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, @@ -295,8 +294,6 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: # TODO(EA2D): special case unneeded with 2D EAs i8values = np.full(self.shape[1], fill_value.value) return DatetimeArray(i8values, dtype=empty_dtype) - elif is_categorical_dtype(blk_dtype): - pass elif is_extension_array_dtype(blk_dtype): pass elif is_extension_array_dtype(empty_dtype):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39824
2021-02-15T18:52:09Z
2021-02-16T16:52:18Z
2021-02-16T16:52:18Z
2021-02-16T17:55:16Z
BUG: Fixes plotting with nullable integers (#32073)
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ac930b3e77785..5c4472749e11f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -746,6 +746,7 @@ Plotting indexed by a :class:`.TimedeltaIndex` with a fixed frequency and the x-axis lower limit was greater than the upper limit (:issue:`37454`) - Bug in :meth:`.DataFrameGroupBy.boxplot` when ``subplots=False`` would raise a ``KeyError`` (:issue:`16748`) - Bug in :meth:`DataFrame.plot` and :meth:`Series.plot` was overwriting matplotlib's shared y axes behaviour when no ``sharey`` parameter was passed (:issue:`37942`) +- Bug in :meth:`DataFrame.plot` was raising a ``TypeError`` with ``ExtensionDtype`` columns (:issue:`32073`) Groupby/resample/rolling diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index bef2d82706ffc..1a22e5629ebe8 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -9,9 +9,12 @@ from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( + is_extension_array_dtype, is_float, + is_float_dtype, is_hashable, is_integer, + is_integer_dtype, is_iterator, is_list_like, is_number, @@ -383,6 +386,20 @@ def result(self): else: return self.axes[0] + def _convert_to_ndarray(self, data): + # GH32073: cast to float if values contain nulled integers + if ( + is_integer_dtype(data.dtype) or is_float_dtype(data.dtype) + ) and is_extension_array_dtype(data.dtype): + return data.to_numpy(dtype="float", na_value=np.nan) + + # GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to + # np.ndarray before plot. + if len(data) > 0: + return np.asarray(data) + + return data + def _compute_plot_data(self): data = self.data @@ -423,13 +440,7 @@ def _compute_plot_data(self): if is_empty: raise TypeError("no numeric data to plot") - # GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to - # np.ndarray before plot. - numeric_data = numeric_data.copy() - for col in numeric_data: - numeric_data[col] = np.asarray(numeric_data[col]) - - self.data = numeric_data + self.data = numeric_data.apply(self._convert_to_ndarray) def _make_plot(self): raise AbstractMethodError(self) diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index dc7478fe6ef4a..c66334065ea63 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -149,6 +149,28 @@ def test_plot(self): result = ax.axes assert result is axes[0] + def test_nullable_int_plot(self): + # GH 32073 + dates = ["2008", "2009", None, "2011", "2012"] + df = DataFrame( + { + "A": [1, 2, 3, 4, 5], + "B": [1.0, 2.0, 3.0, 4.0, 5.0], + "C": [7, 5, np.nan, 3, 2], + "D": pd.to_datetime(dates, format="%Y"), + "E": pd.to_datetime(dates, format="%Y", utc=True), + }, + dtype=np.int64, + ) + + _check_plot_works(df.plot, x="A", y="B") + _check_plot_works(df[["A", "B"]].plot, x="A", y="B") + _check_plot_works(df[["C", "A"]].plot, x="C", y="A") # nullable value on x-axis + _check_plot_works(df[["A", "C"]].plot, x="A", y="C") + _check_plot_works(df[["B", "C"]].plot, x="B", y="C") + _check_plot_works(df[["A", "D"]].plot, x="A", y="D") + _check_plot_works(df[["A", "E"]].plot, x="A", y="E") + def test_integer_array_plot(self): # GH 25587 arr = integer_array([1, 2, 3, 4], dtype="UInt32")
- [x] closes #32073 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/38014
2020-11-23T06:47:28Z
2020-12-07T13:38:28Z
2020-12-07T13:38:28Z
2020-12-08T02:59:26Z
REF: Implement isin on DTA instead of DTI
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index a3abfaa48500c..b79905796f7cd 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -433,10 +433,8 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: return cast("Categorical", comps).isin(values) if needs_i8_conversion(comps): - # Dispatch to DatetimeLikeIndexMixin.isin - from pandas import Index - - return Index(comps).isin(values) + # Dispatch to DatetimeLikeArrayMixin.isin + return array(comps).isin(values) comps, dtype = _ensure_data(comps) values, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 3b419f8d1da2a..c482eae35b313 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -62,7 +62,7 @@ from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna from pandas.core import nanops, ops -from pandas.core.algorithms import checked_add_with_arr, unique1d, value_counts +from pandas.core.algorithms import checked_add_with_arr, isin, unique1d, value_counts from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import NDArrayBackedExtensionArray import pandas.core.common as com @@ -697,6 +697,59 @@ def map(self, mapper): return Index(self).map(mapper).array + def isin(self, values) -> np.ndarray: + """ + Compute boolean array of whether each value is found in the + passed set of values. + + Parameters + ---------- + values : set or sequence of values + + Returns + ------- + ndarray[bool] + """ + if not hasattr(values, "dtype"): + values = np.asarray(values) + + if values.dtype.kind in ["f", "i", "u", "c"]: + # TODO: de-duplicate with equals, validate_comparison_value + return np.zeros(self.shape, dtype=bool) + + if not isinstance(values, type(self)): + inferrable = [ + "timedelta", + "timedelta64", + "datetime", + "datetime64", + "date", + "period", + ] + if values.dtype == object: + inferred = lib.infer_dtype(values, skipna=False) + if inferred not in inferrable: + if inferred == "string": + pass + + elif "mixed" in inferred: + return isin(self.astype(object), values) + else: + return np.zeros(self.shape, dtype=bool) + + try: + values = type(self)._from_sequence(values) + except ValueError: + return isin(self.astype(object), values) + + try: + self._check_compatible_with(values) + except (TypeError, ValueError): + # Includes tzawareness mismatch and IncompatibleFrequencyError + return np.zeros(self.shape, dtype=bool) + + return isin(self.asi8, values.asi8) + # ------------------------------------------------------------------ # Null Handling diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7b72196c3c2f3..b5900ead246f3 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -5145,7 +5145,7 @@ def isin(self, values, level=None): """ if level is not None: self._validate_index_level(level) - return algos.isin(self, values) + return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index ce5d62aec4f9f..d0f818410f96a 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -24,7 +24,6 @@ from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ABCIndex, ABCSeries -from pandas.core import algorithms from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin import pandas.core.common as com @@ -500,58 +499,6 @@ def _partial_date_slice( __truediv__ = make_wrapped_arith_op("__truediv__") __rtruediv__ = make_wrapped_arith_op("__rtruediv__") - def isin(self, values, level=None): - """ - Compute boolean array of whether each index value is found in the - passed set of values. - - Parameters - ---------- - values : set or sequence of values - - Returns - ------- - is_contained : ndarray (boolean dtype) - """ - if level is not None: - self._validate_index_level(level) - - if not hasattr(values, "dtype"): - values = np.asarray(values) - - if values.dtype.kind in ["f", "i", "u", "c"]: - # TODO: de-duplicate with equals, validate_comparison_value - return np.zeros(self.shape, dtype=bool) - - if not isinstance(values, type(self)): - inferrable = [ - "timedelta", - "timedelta64", - "datetime", - "datetime64", - "date", - "period", - ] - if values.dtype == object: - inferred = lib.infer_dtype(values, skipna=False) - if inferred not in inferrable: - if "mixed" in inferred: - return self.astype(object).isin(values) - return np.zeros(self.shape, dtype=bool) - - try: - values = type(self)(values) - except ValueError: - return self.astype(object).isin(values) - - try: - self._data._check_compatible_with(values) - except (TypeError, ValueError): - # Includes tzawareness mismatch and IncompatibleFrequencyError - return np.zeros(self.shape, dtype=bool) - - return algorithms.isin(self.asi8, values.asi8) - def shift(self, periods=1, freq=None): """ Shift index by desired number of time frequency increments. diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 24aaf5885fe0e..7778b1e264cd8 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -27,7 +27,6 @@ from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna -from pandas.core import algorithms import pandas.core.common as com from pandas.core.indexes.base import Index, maybe_extract_name @@ -434,12 +433,6 @@ def __contains__(self, other: Any) -> bool: def is_unique(self) -> bool: return super().is_unique and self._nan_idxs.size < 2 - @doc(Index.isin) - def isin(self, values, level=None): - if level is not None: - self._validate_index_level(level) - return algorithms.isin(np.array(self), values) - def _can_union_without_object_cast(self, other) -> bool: # See GH#26778, further casting may occur in NumericIndex._union return is_numeric_dtype(other.dtype) diff --git a/pandas/core/series.py b/pandas/core/series.py index d59e72a04209c..4c3ad38c8a922 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4691,7 +4691,7 @@ def isin(self, values) -> "Series": 5 False Name: animal, dtype: bool """ - result = algorithms.isin(self, values) + result = algorithms.isin(self._values, values) return self._constructor(result, index=self.index).__finalize__( self, method="isin" )
https://api.github.com/repos/pandas-dev/pandas/pulls/38012
2020-11-23T03:00:47Z
2020-11-24T02:46:26Z
2020-11-24T02:46:26Z
2020-11-24T03:07:29Z
TST/REF: collect Index.equals tests
diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 1a05dbe2bb230..b2b3f76824b9e 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -381,77 +381,6 @@ def test_ensure_copied_data(self, index): result = CategoricalIndex(index.values, copy=False) assert _base(index.values) is _base(result.values) - def test_equals_categorical(self): - ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) - ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) - - assert ci1.equals(ci1) - assert not ci1.equals(ci2) - assert ci1.equals(ci1.astype(object)) - assert ci1.astype(object).equals(ci1) - - assert (ci1 == ci1).all() - assert not (ci1 != ci1).all() - assert not (ci1 > ci1).all() - assert not (ci1 < ci1).all() - assert (ci1 <= ci1).all() - assert (ci1 >= ci1).all() - - assert not (ci1 == 1).all() - assert (ci1 == Index(["a", "b"])).all() - assert (ci1 == ci1.values).all() - - # invalid comparisons - with pytest.raises(ValueError, match="Lengths must match"): - ci1 == Index(["a", "b", "c"]) - - msg = "Categoricals can only be compared if 'categories' are the same" - with pytest.raises(TypeError, match=msg): - ci1 == ci2 - with pytest.raises(TypeError, match=msg): - ci1 == Categorical(ci1.values, ordered=False) - with pytest.raises(TypeError, match=msg): - ci1 == Categorical(ci1.values, categories=list("abc")) - - # tests - # make sure that we are testing for category inclusion properly - ci = CategoricalIndex(list("aabca"), categories=["c", "a", "b"]) - assert not ci.equals(list("aabca")) - # Same categories, but different order - # Unordered - assert ci.equals(CategoricalIndex(list("aabca"))) - # Ordered - assert not ci.equals(CategoricalIndex(list("aabca"), ordered=True)) - assert ci.equals(ci.copy()) - - ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) - assert not ci.equals(list("aabca")) - assert not ci.equals(CategoricalIndex(list("aabca"))) - assert ci.equals(ci.copy()) - - ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) - assert not ci.equals(list("aabca") + [np.nan]) - assert ci.equals(CategoricalIndex(list("aabca") + [np.nan])) - assert not ci.equals(CategoricalIndex(list("aabca") + [np.nan], ordered=True)) - assert ci.equals(ci.copy()) - - def test_equals_categorical_unordered(self): - # https://github.com/pandas-dev/pandas/issues/16603 - a = CategoricalIndex(["A"], categories=["A", "B"]) - b = CategoricalIndex(["A"], categories=["B", "A"]) - c = CategoricalIndex(["C"], categories=["B", "A"]) - assert a.equals(b) - assert not a.equals(c) - assert not b.equals(c) - - def test_equals_non_category(self): - # GH#37667 Case where other contains a value not among ci's - # categories ("D") and also contains np.nan - ci = CategoricalIndex(["A", "B", np.nan, np.nan]) - other = Index(["A", "B", "D", np.nan]) - - assert not ci.equals(other) - def test_frame_repr(self): df = pd.DataFrame({"A": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"])) result = repr(df) diff --git a/pandas/tests/indexes/categorical/test_equals.py b/pandas/tests/indexes/categorical/test_equals.py new file mode 100644 index 0000000000000..3f9a58c6a06cd --- /dev/null +++ b/pandas/tests/indexes/categorical/test_equals.py @@ -0,0 +1,77 @@ +import numpy as np +import pytest + +from pandas import Categorical, CategoricalIndex, Index + + +class TestEquals: + def test_equals_categorical(self): + ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) + + assert ci1.equals(ci1) + assert not ci1.equals(ci2) + assert ci1.equals(ci1.astype(object)) + assert ci1.astype(object).equals(ci1) + + assert (ci1 == ci1).all() + assert not (ci1 != ci1).all() + assert not (ci1 > ci1).all() + assert not (ci1 < ci1).all() + assert (ci1 <= ci1).all() + assert (ci1 >= ci1).all() + + assert not (ci1 == 1).all() + assert (ci1 == Index(["a", "b"])).all() + assert (ci1 == ci1.values).all() + + # invalid comparisons + with pytest.raises(ValueError, match="Lengths must match"): + ci1 == Index(["a", "b", "c"]) + + msg = "Categoricals can only be compared if 'categories' are the same" + with pytest.raises(TypeError, match=msg): + ci1 == ci2 + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, ordered=False) + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, categories=list("abc")) + + # tests + # make sure that we are testing for category inclusion properly + ci = CategoricalIndex(list("aabca"), categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + # Same categories, but different order + # Unordered + assert ci.equals(CategoricalIndex(list("aabca"))) + # Ordered + assert not ci.equals(CategoricalIndex(list("aabca"), ordered=True)) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + assert not ci.equals(CategoricalIndex(list("aabca"))) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca") + [np.nan]) + assert ci.equals(CategoricalIndex(list("aabca") + [np.nan])) + assert not ci.equals(CategoricalIndex(list("aabca") + [np.nan], ordered=True)) + assert ci.equals(ci.copy()) + + def test_equals_categorical_unordered(self): + # https://github.com/pandas-dev/pandas/issues/16603 + a = CategoricalIndex(["A"], categories=["A", "B"]) + b = CategoricalIndex(["A"], categories=["B", "A"]) + c = CategoricalIndex(["C"], categories=["B", "A"]) + assert a.equals(b) + assert not a.equals(c) + assert not b.equals(c) + + def test_equals_non_category(self): + # GH#37667 Case where other contains a value not among ci's + # categories ("D") and also contains np.nan + ci = CategoricalIndex(["A", "B", np.nan, np.nan]) + other = Index(["A", "B", "D", np.nan]) + + assert not ci.equals(other) diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index 6f078237e3a97..7ce8640d09777 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -1,4 +1,5 @@ """ generic datetimelike tests """ + import numpy as np import pytest @@ -109,27 +110,6 @@ def test_getitem_preserves_freq(self): result = index[:] assert result.freq == index.freq - def test_not_equals_numeric(self): - index = self.create_index() - - assert not index.equals(pd.Index(index.asi8)) - assert not index.equals(pd.Index(index.asi8.astype("u8"))) - assert not index.equals(pd.Index(index.asi8).astype("f8")) - - def test_equals(self): - index = self.create_index() - - assert index.equals(index.astype(object)) - assert index.equals(pd.CategoricalIndex(index)) - assert index.equals(pd.CategoricalIndex(index.astype(object))) - - def test_not_equals_strings(self): - index = self.create_index() - - other = pd.Index([str(x) for x in index], dtype=object) - assert not index.equals(other) - assert not index.equals(pd.CategoricalIndex(other)) - def test_where_cast_str(self): index = self.create_index() diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index b801f750718ac..b35aa28ffc40b 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -177,12 +177,6 @@ def test_misc_coverage(self): result = rng.groupby(rng.day) assert isinstance(list(result.values())[0][0], Timestamp) - idx = DatetimeIndex(["2000-01-03", "2000-01-01", "2000-01-02"]) - assert not idx.equals(list(idx)) - - non_datetime = Index(list("abc")) - assert not idx.equals(list(non_datetime)) - def test_string_index_series_name_converted(self): # #1644 df = DataFrame(np.random.randn(10, 4), index=date_range("1/1/2000", periods=10)) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 0359ee17f87c5..cbbe3aca9ccbe 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -325,47 +325,6 @@ def test_nat(self, tz_naive_fixture): assert idx.hasnans is True tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp)) - def test_equals(self): - # GH 13107 - idx = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"]) - assert idx.equals(idx) - assert idx.equals(idx.copy()) - assert idx.equals(idx.astype(object)) - assert idx.astype(object).equals(idx) - assert idx.astype(object).equals(idx.astype(object)) - assert not idx.equals(list(idx)) - assert not idx.equals(Series(idx)) - - idx2 = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific") - assert not idx.equals(idx2) - assert not idx.equals(idx2.copy()) - assert not idx.equals(idx2.astype(object)) - assert not idx.astype(object).equals(idx2) - assert not idx.equals(list(idx2)) - assert not idx.equals(Series(idx2)) - - # same internal, different tz - idx3 = DatetimeIndex(idx.asi8, tz="US/Pacific") - tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) - assert not idx.equals(idx3) - assert not idx.equals(idx3.copy()) - assert not idx.equals(idx3.astype(object)) - assert not idx.astype(object).equals(idx3) - assert not idx.equals(list(idx3)) - assert not idx.equals(Series(idx3)) - - # check that we do not raise when comparing with OutOfBounds objects - oob = Index([datetime(2500, 1, 1)] * 3, dtype=object) - assert not idx.equals(oob) - assert not idx2.equals(oob) - assert not idx3.equals(oob) - - # check that we do not raise when comparing with OutOfBounds dt64 - oob2 = oob.map(np.datetime64) - assert not idx.equals(oob2) - assert not idx2.equals(oob2) - assert not idx3.equals(oob2) - @pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []]) @pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)]) @pytest.mark.parametrize("tz", [None, "US/Eastern"]) @@ -429,9 +388,6 @@ def test_copy(self): repr(cp) tm.assert_index_equal(cp, self.rng) - def test_equals(self): - assert not self.rng.equals(list(self.rng)) - def test_identical(self): t1 = self.rng.copy() t2 = self.rng.copy() @@ -465,6 +421,3 @@ def test_copy(self): cp = self.rng.copy() repr(cp) tm.assert_index_equal(cp, self.rng) - - def test_equals(self): - assert not self.rng.equals(list(self.rng)) diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py index 343c3d2e145f6..cc782a6e3bb81 100644 --- a/pandas/tests/indexes/interval/test_base.py +++ b/pandas/tests/indexes/interval/test_base.py @@ -21,34 +21,6 @@ def index(self): def create_index(self, closed="right"): return IntervalIndex.from_breaks(range(11), closed=closed) - def test_equals(self, closed): - expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) - assert expected.equals(expected) - assert expected.equals(expected.copy()) - - assert not expected.equals(expected.astype(object)) - assert not expected.equals(np.array(expected)) - assert not expected.equals(list(expected)) - - assert not expected.equals([1, 2]) - assert not expected.equals(np.array([1, 2])) - assert not expected.equals(date_range("20130101", periods=2)) - - expected_name1 = IntervalIndex.from_breaks( - np.arange(5), closed=closed, name="foo" - ) - expected_name2 = IntervalIndex.from_breaks( - np.arange(5), closed=closed, name="bar" - ) - assert expected.equals(expected_name1) - assert expected_name1.equals(expected_name2) - - for other_closed in {"left", "right", "both", "neither"} - {closed}: - expected_other_closed = IntervalIndex.from_breaks( - np.arange(5), closed=other_closed - ) - assert not expected.equals(expected_other_closed) - def test_repr_max_seq_item_setting(self): # override base test: not a valid repr as we use interval notation pass diff --git a/pandas/tests/indexes/interval/test_equals.py b/pandas/tests/indexes/interval/test_equals.py new file mode 100644 index 0000000000000..e53a836366432 --- /dev/null +++ b/pandas/tests/indexes/interval/test_equals.py @@ -0,0 +1,33 @@ +import numpy as np + +from pandas import IntervalIndex, date_range + + +class TestEquals: + def test_equals(self, closed): + expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) + assert expected.equals(expected) + assert expected.equals(expected.copy()) + + assert not expected.equals(expected.astype(object)) + assert not expected.equals(np.array(expected)) + assert not expected.equals(list(expected)) + + assert not expected.equals([1, 2]) + assert not expected.equals(np.array([1, 2])) + assert not expected.equals(date_range("20130101", periods=2)) + + expected_name1 = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name="foo" + ) + expected_name2 = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name="bar" + ) + assert expected.equals(expected_name1) + assert expected_name1.equals(expected_name2) + + for other_closed in {"left", "right", "both", "neither"} - {closed}: + expected_other_closed = IntervalIndex.from_breaks( + np.arange(5), closed=other_closed + ) + assert not expected.equals(expected_other_closed) diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 8e7cb7d86edf5..645019f1ac063 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -287,38 +287,6 @@ def test_nat(self): assert idx.hasnans is True tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp)) - @pytest.mark.parametrize("freq", ["D", "M"]) - def test_equals(self, freq): - # GH#13107 - idx = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq=freq) - assert idx.equals(idx) - assert idx.equals(idx.copy()) - assert idx.equals(idx.astype(object)) - assert idx.astype(object).equals(idx) - assert idx.astype(object).equals(idx.astype(object)) - assert not idx.equals(list(idx)) - assert not idx.equals(Series(idx)) - - idx2 = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq="H") - assert not idx.equals(idx2) - assert not idx.equals(idx2.copy()) - assert not idx.equals(idx2.astype(object)) - assert not idx.astype(object).equals(idx2) - assert not idx.equals(list(idx2)) - assert not idx.equals(Series(idx2)) - - # same internal, different tz - idx3 = PeriodIndex._simple_new( - idx._values._simple_new(idx._values.asi8, freq="H") - ) - tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) - assert not idx.equals(idx3) - assert not idx.equals(idx3.copy()) - assert not idx.equals(idx3.astype(object)) - assert not idx.astype(object).equals(idx3) - assert not idx.equals(list(idx3)) - assert not idx.equals(Series(idx3)) - def test_freq_setter_deprecated(self): # GH 20678 idx = pd.period_range("2018Q1", periods=4, freq="Q") diff --git a/pandas/tests/indexes/test_datetimelike.py b/pandas/tests/indexes/test_datetimelike.py new file mode 100644 index 0000000000000..55a90f982a971 --- /dev/null +++ b/pandas/tests/indexes/test_datetimelike.py @@ -0,0 +1,174 @@ +""" +Tests shared for DatetimeIndex/TimedeltaIndex/PeriodIndex +""" +from datetime import datetime, timedelta + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalIndex, + DatetimeIndex, + Index, + PeriodIndex, + TimedeltaIndex, + date_range, + period_range, +) +import pandas._testing as tm + + +class EqualsTests: + def test_not_equals_numeric(self, index): + + assert not index.equals(Index(index.asi8)) + assert not index.equals(Index(index.asi8.astype("u8"))) + assert not index.equals(Index(index.asi8).astype("f8")) + + def test_equals(self, index): + assert index.equals(index) + assert index.equals(index.astype(object)) + assert index.equals(CategoricalIndex(index)) + assert index.equals(CategoricalIndex(index.astype(object))) + + def test_not_equals_non_arraylike(self, index): + assert not index.equals(list(index)) + + def test_not_equals_strings(self, index): + + other = Index([str(x) for x in index], dtype=object) + assert not index.equals(other) + assert not index.equals(CategoricalIndex(other)) + + def test_not_equals_misc_strs(self, index): + other = Index(list("abc")) + assert not index.equals(other) + + +class TestPeriodIndexEquals(EqualsTests): + @pytest.fixture + def index(self): + return period_range("2013-01-01", periods=5, freq="D") + + # TODO: de-duplicate with other test_equals2 methods + @pytest.mark.parametrize("freq", ["D", "M"]) + def test_equals2(self, freq): + # GH#13107 + idx = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq=freq) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.astype(object).equals(idx) + assert idx.astype(object).equals(idx.astype(object)) + assert not idx.equals(list(idx)) + assert not idx.equals(pd.Series(idx)) + + idx2 = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq="H") + assert not idx.equals(idx2) + assert not idx.equals(idx2.copy()) + assert not idx.equals(idx2.astype(object)) + assert not idx.astype(object).equals(idx2) + assert not idx.equals(list(idx2)) + assert not idx.equals(pd.Series(idx2)) + + # same internal, different tz + idx3 = PeriodIndex._simple_new( + idx._values._simple_new(idx._values.asi8, freq="H") + ) + tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) + assert not idx.equals(idx3) + assert not idx.equals(idx3.copy()) + assert not idx.equals(idx3.astype(object)) + assert not idx.astype(object).equals(idx3) + assert not idx.equals(list(idx3)) + assert not idx.equals(pd.Series(idx3)) + + +class TestDatetimeIndexEquals(EqualsTests): + @pytest.fixture + def index(self): + return date_range("2013-01-01", periods=5) + + def test_equals2(self): + # GH#13107 + idx = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"]) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.astype(object).equals(idx) + assert idx.astype(object).equals(idx.astype(object)) + assert not idx.equals(list(idx)) + assert not idx.equals(pd.Series(idx)) + + idx2 = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific") + assert not idx.equals(idx2) + assert not idx.equals(idx2.copy()) + assert not idx.equals(idx2.astype(object)) + assert not idx.astype(object).equals(idx2) + assert not idx.equals(list(idx2)) + assert not idx.equals(pd.Series(idx2)) + + # same internal, different tz + idx3 = DatetimeIndex(idx.asi8, tz="US/Pacific") + tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) + assert not idx.equals(idx3) + assert not idx.equals(idx3.copy()) + assert not idx.equals(idx3.astype(object)) + assert not idx.astype(object).equals(idx3) + assert not idx.equals(list(idx3)) + assert not idx.equals(pd.Series(idx3)) + + # check that we do not raise when comparing with OutOfBounds objects + oob = Index([datetime(2500, 1, 1)] * 3, dtype=object) + assert not idx.equals(oob) + assert not idx2.equals(oob) + assert not idx3.equals(oob) + + # check that we do not raise when comparing with OutOfBounds dt64 + oob2 = oob.map(np.datetime64) + assert not idx.equals(oob2) + assert not idx2.equals(oob2) + assert not idx3.equals(oob2) + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_not_equals_bday(self, freq): + rng = date_range("2009-01-01", "2010-01-01", freq=freq) + assert not rng.equals(list(rng)) + + +class TestTimedeltaIndexEquals(EqualsTests): + @pytest.fixture + def index(self): + return tm.makeTimedeltaIndex(10) + + def test_equals2(self): + # GH#13107 + idx = TimedeltaIndex(["1 days", "2 days", "NaT"]) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.astype(object).equals(idx) + assert idx.astype(object).equals(idx.astype(object)) + assert not idx.equals(list(idx)) + assert not idx.equals(pd.Series(idx)) + + idx2 = TimedeltaIndex(["2 days", "1 days", "NaT"]) + assert not idx.equals(idx2) + assert not idx.equals(idx2.copy()) + assert not idx.equals(idx2.astype(object)) + assert not idx.astype(object).equals(idx2) + assert not idx.astype(object).equals(idx2.astype(object)) + assert not idx.equals(list(idx2)) + assert not idx.equals(pd.Series(idx2)) + + # Check that we dont raise OverflowError on comparisons outside the + # implementation range + oob = Index([timedelta(days=10 ** 6)] * 3, dtype=object) + assert not idx.equals(oob) + assert not idx2.equals(oob) + + # FIXME: oob.apply(np.timedelta64) incorrectly overflows + oob2 = Index([np.timedelta64(x) for x in oob], dtype=object) + assert not idx.equals(oob2) + assert not idx2.equals(oob2) diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 15b94eafe2f27..52097dbe610ef 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -1,5 +1,3 @@ -from datetime import timedelta - import numpy as np import pytest @@ -228,37 +226,6 @@ def test_nat(self): assert idx.hasnans is True tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp)) - def test_equals(self): - # GH 13107 - idx = TimedeltaIndex(["1 days", "2 days", "NaT"]) - assert idx.equals(idx) - assert idx.equals(idx.copy()) - assert idx.equals(idx.astype(object)) - assert idx.astype(object).equals(idx) - assert idx.astype(object).equals(idx.astype(object)) - assert not idx.equals(list(idx)) - assert not idx.equals(Series(idx)) - - idx2 = TimedeltaIndex(["2 days", "1 days", "NaT"]) - assert not idx.equals(idx2) - assert not idx.equals(idx2.copy()) - assert not idx.equals(idx2.astype(object)) - assert not idx.astype(object).equals(idx2) - assert not idx.astype(object).equals(idx2.astype(object)) - assert not idx.equals(list(idx2)) - assert not idx.equals(Series(idx2)) - - # Check that we dont raise OverflowError on comparisons outside the - # implementation range - oob = pd.Index([timedelta(days=10 ** 6)] * 3, dtype=object) - assert not idx.equals(oob) - assert not idx2.equals(oob) - - # FIXME: oob.apply(np.timedelta64) incorrectly overflows - oob2 = pd.Index([np.timedelta64(x) for x in oob], dtype=object) - assert not idx.equals(oob2) - assert not idx2.equals(oob2) - @pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []]) @pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)]) def test_freq_setter(self, values, freq): diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 4a1749ff734c1..774370ed866da 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -118,12 +118,6 @@ def test_misc_coverage(self): result = rng.groupby(rng.days) assert isinstance(list(result.values())[0][0], Timedelta) - idx = TimedeltaIndex(["3d", "1d", "2d"]) - assert not idx.equals(list(idx)) - - non_td = Index(list("abc")) - assert not idx.equals(list(non_td)) - def test_map(self): # test_map_dictlike generally tests
https://api.github.com/repos/pandas-dev/pandas/pulls/38011
2020-11-22T23:47:34Z
2020-11-24T13:25:29Z
2020-11-24T13:25:29Z
2020-11-24T15:24:33Z
BUG: loc returning wrong elements for non-monotonic DatetimeIndex
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 393866b92771b..c5969748c3d53 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -238,6 +238,7 @@ Indexing ^^^^^^^^ - Bug in :meth:`CategoricalIndex.get_indexer` failing to raise ``InvalidIndexError`` when non-unique (:issue:`38372`) - Bug in inserting many new columns into a :class:`DataFrame` causing incorrect subsequent indexing behavior (:issue:`38380`) +- Bug in :meth:`DataFrame.loc`, :meth:`Series.loc`, :meth:`DataFrame.__getitem__` and :meth:`Series.__getitem__` returning incorrect elements for non-monotonic :class:`DatetimeIndex` for string slices (:issue:`33146`) - Bug in :meth:`DataFrame.iloc.__setitem__` and :meth:`DataFrame.loc.__setitem__` with mixed dtypes when setting with a dictionary value (:issue:`38335`) - Bug in :meth:`DataFrame.loc` dropping levels of :class:`MultiIndex` when :class:`DataFrame` used as input has only one row (:issue:`10521`) - diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index d176b6a5d8e6d..4b39214bc4f96 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -776,42 +776,44 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): if isinstance(end, date) and not isinstance(end, datetime): end = datetime.combine(end, time(0, 0)) - try: + def check_str_or_none(point): + return point is not None and not isinstance(point, str) + + # GH#33146 if start and end are combinations of str and None and Index is not + # monotonic, we can not use Index.slice_indexer because it does not honor the + # actual elements, is only searching for start and end + if ( + check_str_or_none(start) + or check_str_or_none(end) + or self.is_monotonic_increasing + ): return Index.slice_indexer(self, start, end, step, kind=kind) - except KeyError: - # For historical reasons DatetimeIndex by default supports - # value-based partial (aka string) slices on non-monotonic arrays, - # let's try that. - if (start is None or isinstance(start, str)) and ( - end is None or isinstance(end, str) - ): - mask = np.array(True) - deprecation_mask = np.array(True) - if start is not None: - start_casted = self._maybe_cast_slice_bound(start, "left", kind) - mask = start_casted <= self - deprecation_mask = start_casted == self - - if end is not None: - end_casted = self._maybe_cast_slice_bound(end, "right", kind) - mask = (self <= end_casted) & mask - deprecation_mask = (end_casted == self) | deprecation_mask - - if not deprecation_mask.any(): - warnings.warn( - "Value based partial slicing on non-monotonic DatetimeIndexes " - "with non-existing keys is deprecated and will raise a " - "KeyError in a future Version.", - FutureWarning, - stacklevel=5, - ) - indexer = mask.nonzero()[0][::step] - if len(indexer) == len(self): - return slice(None) - else: - return indexer - else: - raise + + mask = np.array(True) + deprecation_mask = np.array(True) + if start is not None: + start_casted = self._maybe_cast_slice_bound(start, "left", kind) + mask = start_casted <= self + deprecation_mask = start_casted == self + + if end is not None: + end_casted = self._maybe_cast_slice_bound(end, "right", kind) + mask = (self <= end_casted) & mask + deprecation_mask = (end_casted == self) | deprecation_mask + + if not deprecation_mask.any(): + warnings.warn( + "Value based partial slicing on non-monotonic DatetimeIndexes " + "with non-existing keys is deprecated and will raise a " + "KeyError in a future Version.", + FutureWarning, + stacklevel=5, + ) + indexer = mask.nonzero()[0][::step] + if len(indexer) == len(self): + return slice(None) + else: + return indexer # -------------------------------------------------------------------- diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 6c5cd0f335faa..7c73917e44b22 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -15,6 +15,7 @@ Categorical, CategoricalIndex, DataFrame, + DatetimeIndex, Index, MultiIndex, Series, @@ -1556,6 +1557,42 @@ def test_loc_getitem_str_timedeltaindex(self): sliced = df.loc["0 days"] tm.assert_series_equal(sliced, expected) + @pytest.mark.parametrize("indexer_end", [None, "2020-01-02 23:59:59.999999999"]) + def test_loc_getitem_partial_slice_non_monotonicity( + self, tz_aware_fixture, indexer_end, frame_or_series + ): + # GH#33146 + obj = frame_or_series( + [1] * 5, + index=DatetimeIndex( + [ + Timestamp("2019-12-30"), + Timestamp("2020-01-01"), + Timestamp("2019-12-25"), + Timestamp("2020-01-02 23:59:59.999999999"), + Timestamp("2019-12-19"), + ], + tz=tz_aware_fixture, + ), + ) + expected = frame_or_series( + [1] * 2, + index=DatetimeIndex( + [ + Timestamp("2020-01-01"), + Timestamp("2020-01-02 23:59:59.999999999"), + ], + tz=tz_aware_fixture, + ), + ) + indexer = slice("2020-01-01", indexer_end) + + result = obj[indexer] + tm.assert_equal(result, expected) + + result = obj.loc[indexer] + tm.assert_equal(result, expected) + class TestLabelSlicing: def test_loc_getitem_label_slice_across_dst(self): @@ -1652,7 +1689,7 @@ def test_loc_getitem_slice_columns_mixed_dtype(self): # GH: 20975 df = DataFrame({"test": 1, 1: 2, 2: 3}, index=[0]) expected = DataFrame( - data=[[2, 3]], index=[0], columns=pd.Index([1, 2], dtype=object) + data=[[2, 3]], index=[0], columns=Index([1, 2], dtype=object) ) tm.assert_frame_equal(df.loc[:, 1:], expected) @@ -1858,7 +1895,7 @@ def test_loc_set_dataframe_multiindex(): def test_loc_mixed_int_float(): # GH#19456 - ser = Series(range(2), pd.Index([1, 2.0], dtype=object)) + ser = Series(range(2), Index([1, 2.0], dtype=object)) result = ser.loc[1] assert result == 0
- [x] closes #33146 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry That one was tricky. When only one existing key was given or both string parts exist, ``Index.slice_indexer`` is not raising KeyError but selecting a lof of erroneous values
https://api.github.com/repos/pandas-dev/pandas/pulls/38010
2020-11-22T23:07:12Z
2021-01-05T02:27:21Z
2021-01-05T02:27:21Z
2021-01-05T08:49:35Z
CLN: fix E741 ambiguous variable #34150
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index 21081ee23a773..9cec8a5f7d318 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -103,7 +103,10 @@ def setup(self): nidvars = 20 N = 5000 self.letters = list("ABCD") - yrvars = [l + str(num) for l, num in product(self.letters, range(1, nyrs + 1))] + yrvars = [ + letter + str(num) + for letter, num in product(self.letters, range(1, nyrs + 1)) + ] columns = [str(i) for i in range(nidvars)] + yrvars self.df = DataFrame(np.random.randn(N, nidvars + len(yrvars)), columns=columns) self.df["id"] = self.df.index diff --git a/pandas/_testing.py b/pandas/_testing.py index 87e99e520ab60..da2963e167767 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -749,19 +749,19 @@ def assert_index_equal( """ __tracebackhide__ = True - def _check_types(l, r, obj="Index"): + def _check_types(left, right, obj="Index"): if exact: - assert_class_equal(l, r, exact=exact, obj=obj) + assert_class_equal(left, right, exact=exact, obj=obj) # Skip exact dtype checking when `check_categorical` is False if check_categorical: - assert_attr_equal("dtype", l, r, obj=obj) + assert_attr_equal("dtype", left, right, obj=obj) # allow string-like to have different inferred_types - if l.inferred_type in ("string"): - assert r.inferred_type in ("string") + if left.inferred_type in ("string"): + assert right.inferred_type in ("string") else: - assert_attr_equal("inferred_type", l, r, obj=obj) + assert_attr_equal("inferred_type", left, right, obj=obj) def _get_ilevel_values(index, level): # accept level number only @@ -1147,9 +1147,9 @@ def _raise(left, right, err_msg): ) diff = 0 - for l, r in zip(left, right): + for left_arr, right_arr in zip(left, right): # count up differences - if not array_equivalent(l, r, strict_nan=strict_nan): + if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan): diff += 1 diff = diff * 100.0 / left.size diff --git a/pandas/core/common.py b/pandas/core/common.py index d5c078b817ca0..b9e684a169154 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -42,13 +42,13 @@ class SettingWithCopyWarning(Warning): pass -def flatten(l): +def flatten(line): """ Flatten an arbitrarily nested sequence. Parameters ---------- - l : sequence + line : sequence The non string sequence to flatten Notes @@ -59,11 +59,11 @@ def flatten(l): ------- flattened : generator """ - for el in l: - if iterable_not_string(el): - yield from flatten(el) + for element in line: + if iterable_not_string(element): + yield from flatten(element) else: - yield el + yield element def consensus_name_attr(objs): @@ -282,20 +282,23 @@ def is_null_slice(obj) -> bool: ) -def is_true_slices(l): +def is_true_slices(line): """ - Find non-trivial slices in "l": return a list of booleans with same length. + Find non-trivial slices in "line": return a list of booleans with same length. """ - return [isinstance(k, slice) and not is_null_slice(k) for k in l] + return [isinstance(k, slice) and not is_null_slice(k) for k in line] # TODO: used only once in indexing; belongs elsewhere? -def is_full_slice(obj, l) -> bool: +def is_full_slice(obj, line) -> bool: """ We have a full length slice. """ return ( - isinstance(obj, slice) and obj.start == 0 and obj.stop == l and obj.step is None + isinstance(obj, slice) + and obj.start == 0 + and obj.stop == line + and obj.step is None ) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index a38d9cbad0d64..a9b0498081511 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -21,11 +21,11 @@ from pandas.core.construction import array -def _get_dtype_kinds(l) -> Set[str]: +def _get_dtype_kinds(arrays) -> Set[str]: """ Parameters ---------- - l : list of arrays + arrays : list of arrays Returns ------- @@ -33,7 +33,7 @@ def _get_dtype_kinds(l) -> Set[str]: A set of kinds that exist in this list of arrays. """ typs: Set[str] = set() - for arr in l: + for arr in arrays: # Note: we use dtype.kind checks because they are much more performant # than is_foo_dtype diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 27713b5bde201..6abc629c3612c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -726,7 +726,7 @@ def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: d.to_string(buf=buf) value = buf.getvalue() - repr_width = max(len(l) for l in value.split("\n")) + repr_width = max(len(line) for line in value.split("\n")) return repr_width < width @@ -5962,13 +5962,16 @@ def _dispatch_frame_op(self, right, func, axis: Optional[int] = None): # maybe_align_as_frame ensures we do not have an ndarray here assert not isinstance(right, np.ndarray) - arrays = [array_op(l, r) for l, r in zip(self._iter_column_arrays(), right)] + arrays = [ + array_op(_left, _right) + for _left, _right in zip(self._iter_column_arrays(), right) + ] elif isinstance(right, Series): assert right.index.equals(self.index) # Handle other cases later right = right._values - arrays = [array_op(l, right) for l in self._iter_column_arrays()] + arrays = [array_op(left, right) for left in self._iter_column_arrays()] else: # Remaining cases have less-obvious dispatch rules diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index ca9612258a890..11dd3598b4864 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1063,7 +1063,7 @@ def set_codes(self, codes, level=None, inplace=None, verify_integrity=True): def _engine(self): # Calculate the number of bits needed to represent labels in each # level, as log2 of their sizes (including -1 for NaN): - sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels])) + sizes = np.ceil(np.log2([len(level) + 1 for level in self.levels])) # Sum bit counts, starting from the _right_.... lev_bits = np.cumsum(sizes[::-1])[::-1] @@ -1217,10 +1217,10 @@ def dtype(self) -> np.dtype: def _is_memory_usage_qualified(self) -> bool: """ return a boolean if we need a qualified .info display """ - def f(l): - return "mixed" in l or "string" in l or "unicode" in l + def f(level): + return "mixed" in level or "string" in level or "unicode" in level - return any(f(l) for l in self._inferred_type_levels) + return any(f(level) for level in self._inferred_type_levels) @doc(Index.memory_usage) def memory_usage(self, deep: bool = False) -> int: diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 18ebe14763797..44e165b2d06ee 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -137,7 +137,7 @@ def _indexer_and_to_sort(self): @cache_readonly def sorted_labels(self): indexer, to_sort = self._indexer_and_to_sort - return [l.take(indexer) for l in to_sort] + return [line.take(indexer) for line in to_sort] def _make_sorted_values(self, values: np.ndarray) -> np.ndarray: indexer, _ = self._indexer_and_to_sort diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 082c539d034eb..db34b882a3c35 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -829,7 +829,7 @@ def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]: dtypes = self.frame.dtypes._values # if we have a Float level, they don't use leading space at all - restrict_formatting = any(l.is_floating for l in columns.levels) + restrict_formatting = any(level.is_floating for level in columns.levels) need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes))) def space_format(x, y): diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index eb29f6c0d0c48..25e8d9acf4690 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2974,9 +2974,9 @@ def _check_comments(self, lines): if self.comment is None: return lines ret = [] - for l in lines: + for line in lines: rl = [] - for x in l: + for x in line: if not isinstance(x, str) or self.comment not in x: rl.append(x) else: @@ -3003,14 +3003,14 @@ def _remove_empty_lines(self, lines): The same array of lines with the "empty" ones removed. """ ret = [] - for l in lines: + for line in lines: # Remove empty lines and lines with only one whitespace value if ( - len(l) > 1 - or len(l) == 1 - and (not isinstance(l[0], str) or l[0].strip()) + len(line) > 1 + or len(line) == 1 + and (not isinstance(line[0], str) or line[0].strip()) ): - ret.append(l) + ret.append(line) return ret def _check_thousands(self, lines): @@ -3023,9 +3023,9 @@ def _check_thousands(self, lines): def _search_replace_num_columns(self, lines, search, replace): ret = [] - for l in lines: + for line in lines: rl = [] - for i, x in enumerate(l): + for i, x in enumerate(line): if ( not isinstance(x, str) or search not in x diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 890195688b1cb..21f7899f24b51 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -4689,7 +4689,7 @@ def read( # remove names for 'level_%d' df.index = df.index.set_names( - [None if self._re_levels.search(l) else l for l in df.index.names] + [None if self._re_levels.search(name) else name for name in df.index.names] ) return df diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 3d0e30f8b9234..7122a38db9d0a 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -149,8 +149,8 @@ def _make_plot(self): self.maybe_color_bp(bp) self._return_obj = ret - labels = [l for l, _ in self._iter_data()] - labels = [pprint_thing(l) for l in labels] + labels = [left for left, _ in self._iter_data()] + labels = [pprint_thing(left) for left in labels] if not self.use_index: labels = [pprint_thing(key) for key in range(len(labels))] self._set_ticklabels(ax, labels) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index c01cfb9a8b487..bef2d82706ffc 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1580,7 +1580,7 @@ def blank_labeler(label, value): # Blank out labels for values of 0 so they don't overlap # with nonzero wedges if labels is not None: - blabels = [blank_labeler(l, value) for l, value in zip(labels, y)] + blabels = [blank_labeler(left, value) for left, value in zip(labels, y)] else: # pandas\plotting\_matplotlib\core.py:1546: error: Incompatible # types in assignment (expression has type "None", variable has diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index bec1f48f5e64a..a5b517bb8a2fc 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -444,8 +444,8 @@ def get_all_lines(ax: "Axes") -> List["Line2D"]: def get_xlim(lines: Iterable["Line2D"]) -> Tuple[float, float]: left, right = np.inf, -np.inf - for l in lines: - x = l.get_xdata(orig=False) + for line in lines: + x = line.get_xdata(orig=False) left = min(np.nanmin(x), left) right = max(np.nanmax(x), right) return left, right diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py index ec834118ea7a1..1bc06ee4b6397 100644 --- a/pandas/tests/extension/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -25,9 +25,9 @@ def make_data(): N = 100 - left = np.random.uniform(size=N).cumsum() - right = left + np.random.uniform(size=N) - return [Interval(l, r) for l, r in zip(left, right)] + left_array = np.random.uniform(size=N).cumsum() + right_array = left_array + np.random.uniform(size=N) + return [Interval(left, right) for left, right in zip(left_array, right_array)] @pytest.fixture diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py index f8feef7a95eab..db96543dc69b8 100644 --- a/pandas/tests/frame/methods/test_to_dict.py +++ b/pandas/tests/frame/methods/test_to_dict.py @@ -118,8 +118,8 @@ def test_to_dict(self, mapping): ] assert isinstance(recons_data, list) assert len(recons_data) == 3 - for l, r in zip(recons_data, expected_records): - tm.assert_dict_equal(l, r) + for left, right in zip(recons_data, expected_records): + tm.assert_dict_equal(left, right) # GH#10844 recons_data = DataFrame(test_data).to_dict("index") diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py index c0ca0b415ba8e..07e4fc937bef8 100644 --- a/pandas/tests/indexes/interval/test_constructors.py +++ b/pandas/tests/indexes/interval/test_constructors.py @@ -339,8 +339,8 @@ def get_kwargs_from_breaks(self, breaks, closed="right"): return {"data": breaks} ivs = [ - Interval(l, r, closed) if notna(l) else l - for l, r in zip(breaks[:-1], breaks[1:]) + Interval(left, right, closed) if notna(left) else left + for left, right in zip(breaks[:-1], breaks[1:]) ] if isinstance(breaks, list): diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index fffaf3830560f..b8734ce8950f2 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -54,7 +54,10 @@ def test_properties(self, closed): assert index.closed == closed - ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))] + ivs = [ + Interval(left, right, closed) + for left, right in zip(range(10), range(1, 11)) + ] expected = np.array(ivs, dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) @@ -74,8 +77,8 @@ def test_properties(self, closed): assert index.closed == closed ivs = [ - Interval(l, r, closed) if notna(l) else np.nan - for l, r in zip(expected_left, expected_right) + Interval(left, right, closed) if notna(left) else np.nan + for left, right in zip(expected_left, expected_right) ] expected = np.array(ivs, dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index e8e31aa0cef80..2b7a6ee304891 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -817,8 +817,8 @@ def test_pyint_engine(): # integers, rather than uint64. N = 5 keys = [ - tuple(l) - for l in [ + tuple(arr) + for arr in [ [0] * 10 * N, [1] * 10 * N, [2] * 10 * N, diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 9f86e78fc36c4..5eb3d9e9ec00e 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -234,8 +234,8 @@ def test_scalar_float(self, frame_or_series): tm.makePeriodIndex, ], ) - @pytest.mark.parametrize("l", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) - def test_slice_non_numeric(self, index_func, l, frame_or_series): + @pytest.mark.parametrize("idx", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) + def test_slice_non_numeric(self, index_func, idx, frame_or_series): # GH 4892 # float_indexers should raise exceptions @@ -251,7 +251,7 @@ def test_slice_non_numeric(self, index_func, l, frame_or_series): "type float" ) with pytest.raises(TypeError, match=msg): - s.iloc[l] + s.iloc[idx] msg = ( "cannot do (slice|positional) indexing " @@ -261,12 +261,12 @@ def test_slice_non_numeric(self, index_func, l, frame_or_series): ) for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]: with pytest.raises(TypeError, match=msg): - idxr(s)[l] + idxr(s)[idx] # setitem msg = "slice indices must be integers or None or have an __index__ method" with pytest.raises(TypeError, match=msg): - s.iloc[l] = 0 + s.iloc[idx] = 0 msg = ( "cannot do (slice|positional) indexing " @@ -276,7 +276,7 @@ def test_slice_non_numeric(self, index_func, l, frame_or_series): ) for idxr in [lambda x: x.loc, lambda x: x]: with pytest.raises(TypeError, match=msg): - idxr(s)[l] = 0 + idxr(s)[idx] = 0 def test_slice_integer(self): @@ -294,9 +294,9 @@ def test_slice_integer(self): s = Series(range(5), index=index) # getitem - for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: + for idx in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: - result = s.loc[l] + result = s.loc[idx] # these are all label indexing # except getitem which is positional @@ -308,9 +308,9 @@ def test_slice_integer(self): self.check(result, s, indexer, False) # getitem out-of-bounds - for l in [slice(-6, 6), slice(-6.0, 6.0)]: + for idx in [slice(-6, 6), slice(-6.0, 6.0)]: - result = s.loc[l] + result = s.loc[idx] # these are all label indexing # except getitem which is positional @@ -331,13 +331,13 @@ def test_slice_integer(self): s[slice(-6.0, 6.0)] # getitem odd floats - for l, res1 in [ + for idx, res1 in [ (slice(2.5, 4), slice(3, 5)), (slice(2, 3.5), slice(2, 4)), (slice(2.5, 3.5), slice(3, 4)), ]: - result = s.loc[l] + result = s.loc[idx] if oob: res = slice(0, 0) else: @@ -352,10 +352,10 @@ def test_slice_integer(self): "type float" ) with pytest.raises(TypeError, match=msg): - s[l] + s[idx] - @pytest.mark.parametrize("l", [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]) - def test_integer_positional_indexing(self, l): + @pytest.mark.parametrize("idx", [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]) + def test_integer_positional_indexing(self, idx): """make sure that we are raising on positional indexing w.r.t. an integer index """ @@ -372,9 +372,9 @@ def test_integer_positional_indexing(self, l): "type float" ) with pytest.raises(TypeError, match=msg): - s[l] + s[idx] with pytest.raises(TypeError, match=msg): - s.iloc[l] + s.iloc[idx] @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) def test_slice_integer_frame_getitem(self, index_func): @@ -385,9 +385,9 @@ def test_slice_integer_frame_getitem(self, index_func): s = DataFrame(np.random.randn(5, 2), index=index) # getitem - for l in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]: + for idx in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]: - result = s.loc[l] + result = s.loc[idx] indexer = slice(0, 2) self.check(result, s, indexer, False) @@ -398,12 +398,12 @@ def test_slice_integer_frame_getitem(self, index_func): "type float" ) with pytest.raises(TypeError, match=msg): - s[l] + s[idx] # getitem out-of-bounds - for l in [slice(-10, 10), slice(-10.0, 10.0)]: + for idx in [slice(-10, 10), slice(-10.0, 10.0)]: - result = s.loc[l] + result = s.loc[idx] self.check(result, s, slice(-10, 10), True) # positional indexing @@ -416,13 +416,13 @@ def test_slice_integer_frame_getitem(self, index_func): s[slice(-10.0, 10.0)] # getitem odd floats - for l, res in [ + for idx, res in [ (slice(0.5, 1), slice(1, 2)), (slice(0, 0.5), slice(0, 1)), (slice(0.5, 1.5), slice(1, 2)), ]: - result = s.loc[l] + result = s.loc[idx] self.check(result, s, res, False) # positional indexing @@ -432,11 +432,11 @@ def test_slice_integer_frame_getitem(self, index_func): "type float" ) with pytest.raises(TypeError, match=msg): - s[l] + s[idx] - @pytest.mark.parametrize("l", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) + @pytest.mark.parametrize("idx", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) - def test_float_slice_getitem_with_integer_index_raises(self, l, index_func): + def test_float_slice_getitem_with_integer_index_raises(self, idx, index_func): # similar to above, but on the getitem dim (of a DataFrame) index = index_func(5) @@ -445,8 +445,8 @@ def test_float_slice_getitem_with_integer_index_raises(self, l, index_func): # setitem sc = s.copy() - sc.loc[l] = 0 - result = sc.loc[l].values.ravel() + sc.loc[idx] = 0 + result = sc.loc[idx].values.ravel() assert (result == 0).all() # positional indexing @@ -456,13 +456,13 @@ def test_float_slice_getitem_with_integer_index_raises(self, l, index_func): "type float" ) with pytest.raises(TypeError, match=msg): - s[l] = 0 + s[idx] = 0 with pytest.raises(TypeError, match=msg): - s[l] + s[idx] - @pytest.mark.parametrize("l", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) - def test_slice_float(self, l, frame_or_series): + @pytest.mark.parametrize("idx", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) + def test_slice_float(self, idx, frame_or_series): # same as above, but for floats index = Index(np.arange(5.0)) + 0.1 @@ -472,14 +472,14 @@ def test_slice_float(self, l, frame_or_series): for idxr in [lambda x: x.loc, lambda x: x]: # getitem - result = idxr(s)[l] + result = idxr(s)[idx] assert isinstance(result, type(s)) tm.assert_equal(result, expected) # setitem s2 = s.copy() - idxr(s2)[l] = 0 - result = idxr(s2)[l].values.ravel() + idxr(s2)[idx] = 0 + result = idxr(s2)[idx].values.ravel() assert (result == 0).all() def test_floating_index_doc_example(self): diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index e7831475932d9..08a15300563b8 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -814,12 +814,12 @@ def test_loc_non_unique_memory_error(self): columns = list("ABCDEFG") - def gen_test(l, l2): + def gen_test(length, l2): return pd.concat( [ DataFrame( - np.random.randn(l, len(columns)), - index=np.arange(l), + np.random.randn(length, len(columns)), + index=np.arange(length), columns=columns, ), DataFrame( diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index ce9aa16e57f1c..53d38297eafba 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1681,7 +1681,7 @@ def test_to_string_decimal(self): def test_to_string_line_width(self): df = DataFrame(123, index=range(10, 15), columns=range(30)) s = df.to_string(line_width=80) - assert max(len(l) for l in s.split("\n")) == 80 + assert max(len(line) for line in s.split("\n")) == 80 def test_show_dimensions(self): df = DataFrame(123, index=range(10, 15), columns=range(30)) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 19eb64be1be29..e3c2f20f80ee3 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2425,8 +2425,8 @@ def test_schema(self): frame = tm.makeTimeDataFrame() create_sql = sql.get_schema(frame, "test") lines = create_sql.splitlines() - for l in lines: - tokens = l.split(" ") + for line in lines: + tokens = line.split(" ") if len(tokens) == 2 and tokens[0] == "A": assert tokens[1] == "DATETIME" @@ -2706,8 +2706,8 @@ def test_schema(self): frame = tm.makeTimeDataFrame() create_sql = sql.get_schema(frame, "test") lines = create_sql.splitlines() - for l in lines: - tokens = l.split(" ") + for line in lines: + tokens = line.split(" ") if len(tokens) == 2 and tokens[0] == "A": assert tokens[1] == "DATETIME" diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 66463a4a2358a..590758bc01fbb 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -776,8 +776,8 @@ def test_mixed_freq_hf_first(self): _, ax = self.plt.subplots() high.plot(ax=ax) low.plot(ax=ax) - for l in ax.get_lines(): - assert PeriodIndex(data=l.get_xdata()).freq == "D" + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "D" @pytest.mark.slow def test_mixed_freq_alignment(self): @@ -803,8 +803,8 @@ def test_mixed_freq_lf_first(self): _, ax = self.plt.subplots() low.plot(legend=True, ax=ax) high.plot(legend=True, ax=ax) - for l in ax.get_lines(): - assert PeriodIndex(data=l.get_xdata()).freq == "D" + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "D" leg = ax.get_legend() assert len(leg.texts) == 2 self.plt.close(ax.get_figure()) @@ -816,8 +816,8 @@ def test_mixed_freq_lf_first(self): _, ax = self.plt.subplots() low.plot(ax=ax) high.plot(ax=ax) - for l in ax.get_lines(): - assert PeriodIndex(data=l.get_xdata()).freq == "T" + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "T" def test_mixed_freq_irreg_period(self): ts = tm.makeTimeSeries() @@ -882,8 +882,8 @@ def test_to_weekly_resampling(self): _, ax = self.plt.subplots() high.plot(ax=ax) low.plot(ax=ax) - for l in ax.get_lines(): - assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq @pytest.mark.slow def test_from_weekly_resampling(self): @@ -900,9 +900,9 @@ def test_from_weekly_resampling(self): [1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562], dtype=np.float64, ) - for l in ax.get_lines(): - assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq - xdata = l.get_xdata(orig=False) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + xdata = line.get_xdata(orig=False) if len(xdata) == 12: # idxl lines tm.assert_numpy_array_equal(xdata, expected_l) else: @@ -1013,8 +1013,8 @@ def test_mixed_freq_second_millisecond(self): high.plot(ax=ax) low.plot(ax=ax) assert len(ax.get_lines()) == 2 - for l in ax.get_lines(): - assert PeriodIndex(data=l.get_xdata()).freq == "L" + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "L" tm.close() # low to high @@ -1022,8 +1022,8 @@ def test_mixed_freq_second_millisecond(self): low.plot(ax=ax) high.plot(ax=ax) assert len(ax.get_lines()) == 2 - for l in ax.get_lines(): - assert PeriodIndex(data=l.get_xdata()).freq == "L" + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "L" @pytest.mark.slow def test_irreg_dtypes(self): @@ -1154,12 +1154,12 @@ def test_secondary_upsample(self): _, ax = self.plt.subplots() low.plot(ax=ax) ax = high.plot(secondary_y=True, ax=ax) - for l in ax.get_lines(): - assert PeriodIndex(l.get_xdata()).freq == "D" + for line in ax.get_lines(): + assert PeriodIndex(line.get_xdata()).freq == "D" assert hasattr(ax, "left_ax") assert not hasattr(ax, "right_ax") - for l in ax.left_ax.get_lines(): - assert PeriodIndex(l.get_xdata()).freq == "D" + for line in ax.left_ax.get_lines(): + assert PeriodIndex(line.get_xdata()).freq == "D" @pytest.mark.slow def test_secondary_legend(self): @@ -1259,9 +1259,9 @@ def test_format_date_axis(self): _, ax = self.plt.subplots() ax = df.plot(ax=ax) xaxis = ax.get_xaxis() - for l in xaxis.get_ticklabels(): - if len(l.get_text()) > 0: - assert l.get_rotation() == 30 + for line in xaxis.get_ticklabels(): + if len(line.get_text()) > 0: + assert line.get_rotation() == 30 @pytest.mark.slow def test_ax_plot(self): diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 9ccfdfc146eac..143bac3ad136a 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -2192,7 +2192,9 @@ def test_merge_multiindex_columns(): result = frame_x.merge(frame_y, on="id", suffixes=((l_suf, r_suf))) # Constructing the expected results - expected_labels = [l + l_suf for l in letters] + [l + r_suf for l in letters] + expected_labels = [letter + l_suf for letter in letters] + [ + letter + r_suf for letter in letters + ] expected_index = pd.MultiIndex.from_product( [expected_labels, numbers], names=["outer", "inner"] ) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index d774417e1851c..5a28cd5c418f0 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -2053,8 +2053,8 @@ def test_pivot_table_empty_aggfunc(self): def test_pivot_table_no_column_raises(self): # GH 10326 - def agg(l): - return np.mean(l) + def agg(arr): + return np.mean(arr) foo = DataFrame({"X": [0, 0, 1, 1], "Y": [0, 1, 0, 1], "Z": [10, 20, 30, 40]}) with pytest.raises(KeyError, match="notpresent"): diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py index 3a8a1a3144269..256346d482248 100644 --- a/pandas/util/_doctools.py +++ b/pandas/util/_doctools.py @@ -34,11 +34,11 @@ def _get_cells(self, left, right, vertical) -> Tuple[int, int]: """ if vertical: # calculate required number of cells - vcells = max(sum(self._shape(l)[0] for l in left), self._shape(right)[0]) - hcells = max(self._shape(l)[1] for l in left) + self._shape(right)[1] + vcells = max(sum(self._shape(df)[0] for df in left), self._shape(right)[0]) + hcells = max(self._shape(df)[1] for df in left) + self._shape(right)[1] else: - vcells = max([self._shape(l)[0] for l in left] + [self._shape(right)[0]]) - hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]]) + vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]]) + hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]]) return hcells, vcells def plot(self, left, right, labels=None, vertical: bool = True): @@ -58,7 +58,7 @@ def plot(self, left, right, labels=None, vertical: bool = True): if not isinstance(left, list): left = [left] - left = [self._conv(l) for l in left] + left = [self._conv(df) for df in left] right = self._conv(right) hcells, vcells = self._get_cells(left, right, vertical) @@ -73,8 +73,8 @@ def plot(self, left, right, labels=None, vertical: bool = True): if vertical: gs = gridspec.GridSpec(len(left), hcells) # left - max_left_cols = max(self._shape(l)[1] for l in left) - max_left_rows = max(self._shape(l)[0] for l in left) + max_left_cols = max(self._shape(df)[1] for df in left) + max_left_rows = max(self._shape(df)[0] for df in left) for i, (l, label) in enumerate(zip(left, labels)): ax = fig.add_subplot(gs[i, 0:max_left_cols]) self._make_table(ax, l, title=label, height=1.0 / max_left_rows) @@ -88,10 +88,10 @@ def plot(self, left, right, labels=None, vertical: bool = True): gs = gridspec.GridSpec(1, hcells) # left i = 0 - for l, label in zip(left, labels): - sp = self._shape(l) + for df, label in zip(left, labels): + sp = self._shape(df) ax = fig.add_subplot(gs[0, i : i + sp[1]]) - self._make_table(ax, l, title=label, height=height) + self._make_table(ax, df, title=label, height=height) i += sp[1] # right ax = plt.subplot(gs[0, i:]) diff --git a/setup.cfg b/setup.cfg index 10c7137dc2f86..7b404cb294f58 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,7 +22,6 @@ ignore = W504, # line break after binary operator E402, # module level import not at top of file E731, # do not assign a lambda expression, use a def - E741, # ambiguous variable name 'l' (GH#34150) C406, # Unnecessary list literal - rewrite as a dict literal. C408, # Unnecessary dict call - rewrite as a literal. C409, # Unnecessary list passed to tuple() - rewrite as a tuple literal.
xref #34150
https://api.github.com/repos/pandas-dev/pandas/pulls/38009
2020-11-22T21:59:12Z
2020-11-24T02:49:41Z
2020-11-24T02:49:41Z
2020-12-05T18:49:58Z
REF: define DTA._infer_matches
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index c482eae35b313..8fa2c734092f4 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -101,6 +101,8 @@ class DatetimeLikeArrayMixin(OpsMixin, NDArrayBackedExtensionArray): _generate_range """ + # _infer_matches -> which infer_dtype strings are close enough to our own + _infer_matches: Tuple[str, ...] _is_recognized_dtype: Callable[[DtypeObj], bool] _recognized_scalars: Tuple[Type, ...] _data: np.ndarray diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 7c6b38d9114ab..ce70f929cc79d 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -154,6 +154,7 @@ class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): _scalar_type = Timestamp _recognized_scalars = (datetime, np.datetime64) _is_recognized_dtype = is_datetime64_any_dtype + _infer_matches = ("datetime", "datetime64", "date") # define my properties & methods for delegation _bool_ops = [ diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 80882acceb56a..50ed526cf01e9 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -124,6 +124,7 @@ class PeriodArray(PeriodMixin, dtl.DatelikeOps): _scalar_type = Period _recognized_scalars = (Period,) _is_recognized_dtype = is_period_dtype + _infer_matches = ("period",) # Names others delegate to us _other_ops: List[str] = [] diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 035e6e84c6ec8..998117cc49d50 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -104,6 +104,7 @@ class TimedeltaArray(dtl.TimelikeOps): _scalar_type = Timedelta _recognized_scalars = (timedelta, np.timedelta64, Tick) _is_recognized_dtype = is_timedelta64_dtype + _infer_matches = ("timedelta", "timedelta64") __array_priority__ = 1000 # define my properties & methods for delegation diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index d0f818410f96a..57f6a8ea0cca5 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -157,16 +157,8 @@ def equals(self, other: object) -> bool: elif other.dtype.kind in ["f", "i", "u", "c"]: return False elif not isinstance(other, type(self)): - inferrable = [ - "timedelta", - "timedelta64", - "datetime", - "datetime64", - "date", - "period", - ] - should_try = False + inferrable = self._data._infer_matches if other.dtype == object: should_try = other.inferred_type in inferrable elif is_categorical_dtype(other.dtype): @@ -648,6 +640,9 @@ def _has_complex_internals(self) -> bool: # used to avoid libreduction code paths, which raise or require conversion return False + def is_type_compatible(self, kind: str) -> bool: + return kind in self._data._infer_matches + # -------------------------------------------------------------------- # Set Operation Methods diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1dd3eb1017eca..b39a36d95d27b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -814,9 +814,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): # -------------------------------------------------------------------- - def is_type_compatible(self, kind: str) -> bool: - return kind == self.inferred_type or kind == "datetime" - @property def inferred_type(self) -> str: # b/c datetime is represented as microseconds since the epoch, make diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 27e090f450cd8..f44a1701bfa9b 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -229,9 +229,6 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): # ------------------------------------------------------------------- - def is_type_compatible(self, kind: str) -> bool: - return kind == self.inferred_type or kind == "timedelta" - @property def inferred_type(self) -> str: return "timedelta64"
https://api.github.com/repos/pandas-dev/pandas/pulls/38008
2020-11-22T20:36:39Z
2020-11-24T13:38:43Z
2020-11-24T13:38:43Z
2020-11-24T15:27:17Z
ENH: Arrow backed string array - implement factorize() method without casting to objects
diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index 823daa2e31529..aecc609df574e 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -28,23 +28,36 @@ class Factorize: "datetime64[ns, tz]", "Int64", "boolean", + "string_arrow", ], ] param_names = ["unique", "sort", "dtype"] def setup(self, unique, sort, dtype): N = 10 ** 5 + string_index = tm.makeStringIndex(N) + try: + from pandas.core.arrays.string_arrow import ArrowStringDtype + + string_arrow = pd.array(string_index, dtype=ArrowStringDtype()) + except ImportError: + string_arrow = None + + if dtype == "string_arrow" and not string_arrow: + raise NotImplementedError + data = { "int": pd.Int64Index(np.arange(N)), "uint": pd.UInt64Index(np.arange(N)), "float": pd.Float64Index(np.random.randn(N)), - "string": tm.makeStringIndex(N), + "string": string_index, "datetime64[ns]": pd.date_range("2011-01-01", freq="H", periods=N), "datetime64[ns, tz]": pd.date_range( "2011-01-01", freq="H", periods=N, tz="Asia/Tokyo" ), "Int64": pd.array(np.arange(N), dtype="Int64"), "boolean": pd.array(np.random.randint(0, 2, N), dtype="boolean"), + "string_arrow": string_arrow, }[dtype] if not unique: data = data.repeat(5) diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 8441b324515f3..26fe6338118b6 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -6,6 +6,7 @@ Any, Optional, Sequence, + Tuple, Type, Union, ) @@ -20,6 +21,7 @@ Dtype, NpDtype, ) +from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.base import ExtensionDtype @@ -273,9 +275,22 @@ def __len__(self) -> int: """ return len(self._data) - @classmethod - def _from_factorized(cls, values, original): - return cls._from_sequence(values) + @doc(ExtensionArray.factorize) + def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ExtensionArray]: + encoded = self._data.dictionary_encode() + indices = pa.chunked_array( + [c.indices for c in encoded.chunks], type=encoded.type.index_type + ).to_pandas() + if indices.dtype.kind == "f": + indices[np.isnan(indices)] = na_sentinel + indices = indices.astype(np.int64, copy=False) + + if encoded.num_chunks: + uniques = type(self)(encoded.chunk(0).dictionary) + else: + uniques = type(self)(pa.array([], type=encoded.type.value_type)) + + return indices.values, uniques @classmethod def _concat_same_type(cls, to_concat) -> ArrowStringArray: diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py index d0a3ef17afdbc..49aee76e10f6a 100644 --- a/pandas/tests/extension/test_string.py +++ b/pandas/tests/extension/test_string.py @@ -26,6 +26,29 @@ from pandas.tests.extension import base +def split_array(arr): + if not isinstance(arr.dtype, ArrowStringDtype): + pytest.skip("chunked array n/a") + + def _split_array(arr): + import pyarrow as pa + + arrow_array = arr._data + split = len(arrow_array) // 2 + arrow_array = pa.chunked_array( + [*arrow_array[:split].chunks, *arrow_array[split:].chunks] + ) + assert arrow_array.num_chunks == 2 + return type(arr)(arrow_array) + + return _split_array(arr) + + +@pytest.fixture(params=[True, False]) +def chunked(request): + return request.param + + @pytest.fixture( params=[ StringDtype, @@ -39,28 +62,32 @@ def dtype(request): @pytest.fixture -def data(dtype): +def data(dtype, chunked): strings = np.random.choice(list(string.ascii_letters), size=100) while strings[0] == strings[1]: strings = np.random.choice(list(string.ascii_letters), size=100) - return dtype.construct_array_type()._from_sequence(strings) + arr = dtype.construct_array_type()._from_sequence(strings) + return split_array(arr) if chunked else arr @pytest.fixture -def data_missing(dtype): +def data_missing(dtype, chunked): """Length 2 array with [NA, Valid]""" - return dtype.construct_array_type()._from_sequence([pd.NA, "A"]) + arr = dtype.construct_array_type()._from_sequence([pd.NA, "A"]) + return split_array(arr) if chunked else arr @pytest.fixture -def data_for_sorting(dtype): - return dtype.construct_array_type()._from_sequence(["B", "C", "A"]) +def data_for_sorting(dtype, chunked): + arr = dtype.construct_array_type()._from_sequence(["B", "C", "A"]) + return split_array(arr) if chunked else arr @pytest.fixture -def data_missing_for_sorting(dtype): - return dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"]) +def data_missing_for_sorting(dtype, chunked): + arr = dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"]) + return split_array(arr) if chunked else arr @pytest.fixture @@ -69,10 +96,11 @@ def na_value(): @pytest.fixture -def data_for_grouping(dtype): - return dtype.construct_array_type()._from_sequence( +def data_for_grouping(dtype, chunked): + arr = dtype.construct_array_type()._from_sequence( ["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"] ) + return split_array(arr) if chunked else arr class TestDtype(base.BaseDtypeTests):
xref https://github.com/pandas-dev/pandas/issues/35169#issuecomment-727217462, follow-on to #35259 This is moreless copy/paste from https://github.com/xhochy/fletcher with a slight tidy for initial review feedback still to do - [x] benchmarking - [x] return type for chunked array with more than 1 chunk - [x] maybe update tests. we don't have failing tests, but the return type should be `Tuple[np.ndarray, ExtensionArray]` while we have `return factorize(np_array, na_sentinel=na_sentinel)` if more than 1 chunk and the return type of pd.factorize is `Tuple[np.ndarray, Union[np.ndarray, ABCIndex]]` (mypy doesn't report this as an error since np.ndarray resolves to `Any`) since we don't have failing tests, we probably should paramatrize data for the base extension tests with an array with one chunk and a array with multiple chunks. cc @jorisvandenbossche @xhochy
https://api.github.com/repos/pandas-dev/pandas/pulls/38007
2020-11-22T20:06:58Z
2021-03-02T16:51:43Z
2021-03-02T16:51:43Z
2021-03-02T17:09:39Z
TST/REF: collect tests from test_multilevel
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index ef43319d11464..a7b3333e7c690 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -23,6 +23,14 @@ class TestDataFrameReprInfoEtc: + def test_repr_unicode_level_names(self, frame_or_series): + index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=["\u0394", "i1"]) + + obj = DataFrame(np.random.randn(2, 4), index=index) + if frame_or_series is Series: + obj = obj[0] + repr(obj) + def test_assign_index_sequences(self): # GH#2200 df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}).set_index( diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 28846bcf2f14d..65f192b5d4d00 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1222,6 +1222,40 @@ def test_loc_getitem_slice_datetime_objs_with_datetimeindex(self): result = ser.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)] tm.assert_series_equal(result, ser) + def test_loc_getitem_sorted_index_level_with_duplicates(self): + # GH#4516 sorting a MultiIndex with duplicates and multiple dtypes + mi = MultiIndex.from_tuples( + [ + ("foo", "bar"), + ("foo", "bar"), + ("bah", "bam"), + ("bah", "bam"), + ("foo", "bar"), + ("bah", "bam"), + ], + names=["A", "B"], + ) + df = DataFrame( + [ + [1.0, 1], + [2.0, 2], + [3.0, 3], + [4.0, 4], + [5.0, 5], + [6.0, 6], + ], + index=mi, + columns=["C", "D"], + ) + df = df.sort_index(level=0) + + expected = DataFrame( + [[1.0, 1], [2.0, 2], [5.0, 5]], columns=["C", "D"], index=mi.take([0, 1, 4]) + ) + + result = df.loc[("foo", "bar")] + tm.assert_frame_equal(result, expected) + class TestLocSetitemWithExpansion: @pytest.mark.slow diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 189c792ac228b..84aa8ec6f970f 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -135,7 +135,9 @@ def test_groupby_level_no_obs(self): result = grouped.sum() assert (result.columns == ["f2", "f3"]).all() - def test_insert_index(self, multiindex_year_month_day_dataframe_random_data): + def test_setitem_with_expansion_multiindex_columns( + self, multiindex_year_month_day_dataframe_random_data + ): ymd = multiindex_year_month_day_dataframe_random_data df = ymd[:5].T @@ -242,12 +244,11 @@ def test_std_var_pass_ddof(self): expected = df.groupby(level=0).agg(alt) tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize("klass", [Series, DataFrame]) def test_agg_multiple_levels( - self, multiindex_year_month_day_dataframe_random_data, klass + self, multiindex_year_month_day_dataframe_random_data, frame_or_series ): ymd = multiindex_year_month_day_dataframe_random_data - if klass is Series: + if frame_or_series is Series: ymd = ymd["A"] result = ymd.sum(level=["year", "month"]) @@ -349,14 +350,6 @@ def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data) result = frame.T.loc[:, ["foo", "qux"]] tm.assert_frame_equal(result, expected.T) - def test_unicode_repr_level_names(self): - index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=["\u0394", "i1"]) - - s = Series(range(2), index=index) - df = DataFrame(np.random.randn(2, 4), index=index) - repr(s) - repr(df) - @pytest.mark.parametrize("d", [4, "d"]) def test_empty_frame_groupby_dtypes_consistency(self, d): # GH 20888 @@ -386,28 +379,6 @@ def test_duplicate_groupby_issues(self): result = s.groupby(s.index).first() assert len(result) == 3 - def test_duplicate_mi(self): - # GH 4516 - df = DataFrame( - [ - ["foo", "bar", 1.0, 1], - ["foo", "bar", 2.0, 2], - ["bah", "bam", 3.0, 3], - ["bah", "bam", 4.0, 4], - ["foo", "bar", 5.0, 5], - ["bah", "bam", 6.0, 6], - ], - columns=list("ABCD"), - ) - df = df.set_index(["A", "B"]) - df = df.sort_index(level=0) - expected = DataFrame( - [["foo", "bar", 1.0, 1], ["foo", "bar", 2.0, 2], ["foo", "bar", 5.0, 5]], - columns=list("ABCD"), - ).set_index(["A", "B"]) - result = df.loc[("foo", "bar")] - tm.assert_frame_equal(result, expected) - def test_subsets_multiindex_dtype(self): # GH 20757 data = [["x", 1]]
https://api.github.com/repos/pandas-dev/pandas/pulls/38006
2020-11-22T19:36:02Z
2020-11-23T13:28:00Z
2020-11-23T13:28:00Z
2020-11-23T15:15:35Z
TST/REF: collect indexing tests by method
diff --git a/pandas/tests/frame/indexing/test_getitem.py b/pandas/tests/frame/indexing/test_getitem.py index 2e65770d7afad..868df82a43a91 100644 --- a/pandas/tests/frame/indexing/test_getitem.py +++ b/pandas/tests/frame/indexing/test_getitem.py @@ -68,6 +68,18 @@ def test_getitem_sparse_column_return_type_and_dtype(self): tm.assert_series_equal(result, expected) +class TestGetitemListLike: + def test_getitem_list_missing_key(self): + # GH#13822, incorrect error string with non-unique columns when missing + # column is accessed + df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]}) + df.columns = ["x", "x", "z"] + + # Check that we get the correct value in the KeyError + with pytest.raises(KeyError, match=r"\['y'\] not in index"): + df[["x", "y", "z"]] + + class TestGetitemCallable: def test_getitem_callable(self, float_frame): # GH#12533 diff --git a/pandas/tests/indexing/test_at.py b/pandas/tests/indexing/test_at.py index d410a4137554b..c721ba2e6daad 100644 --- a/pandas/tests/indexing/test_at.py +++ b/pandas/tests/indexing/test_at.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pandas import DataFrame, Series +from pandas import DataFrame, Series, Timestamp import pandas._testing as tm @@ -27,6 +27,16 @@ def test_at_setitem_mixed_index_assignment(self): assert ser.iat[3] == 22 +class TestAtSetItemWithExpansion: + def test_at_setitem_expansion_series_dt64tz_value(self, tz_naive_fixture): + # GH#25506 + ts = Timestamp("2017-08-05 00:00:00+0100", tz=tz_naive_fixture) + result = Series(ts) + result.at[1] = ts + expected = Series([ts, ts]) + tm.assert_series_equal(result, expected) + + class TestAtWithDuplicates: def test_at_with_duplicate_axes_requires_scalar_lookup(self): # GH#33041 check that falling back to loc doesn't allow non-scalar diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 94fc3960f24c5..6fff706e27cd2 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -430,25 +430,6 @@ def test_ix_categorical_index(self): ) tm.assert_frame_equal(cdf.loc[:, ["X", "Y"]], expect) - def test_read_only_source(self): - # GH 10043 - rw_array = np.eye(10) - rw_df = DataFrame(rw_array) - - ro_array = np.eye(10) - ro_array.setflags(write=False) - ro_df = DataFrame(ro_array) - - tm.assert_frame_equal(rw_df.iloc[[1, 2, 3]], ro_df.iloc[[1, 2, 3]]) - tm.assert_frame_equal(rw_df.iloc[[1]], ro_df.iloc[[1]]) - tm.assert_series_equal(rw_df.iloc[1], ro_df.iloc[1]) - tm.assert_frame_equal(rw_df.iloc[1:3], ro_df.iloc[1:3]) - - tm.assert_frame_equal(rw_df.loc[[1, 2, 3]], ro_df.loc[[1, 2, 3]]) - tm.assert_frame_equal(rw_df.loc[[1]], ro_df.loc[[1]]) - tm.assert_series_equal(rw_df.loc[1], ro_df.loc[1]) - tm.assert_frame_equal(rw_df.loc[1:3], ro_df.loc[1:3]) - def test_loc_slice(self): # GH9748 with pytest.raises(KeyError, match="1"): diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index e7bf186ae6456..d00fe58265a2e 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -160,15 +160,22 @@ def test_indexing_with_datetimeindex_tz(self): expected = Series([0, 5], index=index) tm.assert_series_equal(result, expected) - def test_series_partial_set_datetime(self): + @pytest.mark.parametrize("to_period", [True, False]) + def test_loc_getitem_listlike_of_datetimelike_keys(self, to_period): # GH 11497 idx = date_range("2011-01-01", "2011-01-02", freq="D", name="idx") + if to_period: + idx = idx.to_period("D") ser = Series([0.1, 0.2], index=idx, name="s") - result = ser.loc[[Timestamp("2011-01-01"), Timestamp("2011-01-02")]] + keys = [Timestamp("2011-01-01"), Timestamp("2011-01-02")] + if to_period: + keys = [x.to_period("D") for x in keys] + result = ser.loc[keys] exp = Series([0.1, 0.2], index=idx, name="s") - exp.index = exp.index._with_freq(None) + if not to_period: + exp.index = exp.index._with_freq(None) tm.assert_series_equal(result, exp, check_index_type=True) keys = [ @@ -176,8 +183,10 @@ def test_series_partial_set_datetime(self): Timestamp("2011-01-02"), Timestamp("2011-01-01"), ] + if to_period: + keys = [x.to_period("D") for x in keys] exp = Series( - [0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name="idx"), name="s" + [0.2, 0.2, 0.1], index=Index(keys, name="idx", dtype=idx.dtype), name="s" ) result = ser.loc[keys] tm.assert_series_equal(result, exp, check_index_type=True) @@ -187,35 +196,9 @@ def test_series_partial_set_datetime(self): Timestamp("2011-01-02"), Timestamp("2011-01-03"), ] - with pytest.raises(KeyError, match="with any missing labels"): - ser.loc[keys] - - def test_series_partial_set_period(self): - # GH 11497 - - idx = pd.period_range("2011-01-01", "2011-01-02", freq="D", name="idx") - ser = Series([0.1, 0.2], index=idx, name="s") - - result = ser.loc[ - [pd.Period("2011-01-01", freq="D"), pd.Period("2011-01-02", freq="D")] - ] - exp = Series([0.1, 0.2], index=idx, name="s") - tm.assert_series_equal(result, exp, check_index_type=True) + if to_period: + keys = [x.to_period("D") for x in keys] - keys = [ - pd.Period("2011-01-02", freq="D"), - pd.Period("2011-01-02", freq="D"), - pd.Period("2011-01-01", freq="D"), - ] - exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name="idx"), name="s") - result = ser.loc[keys] - tm.assert_series_equal(result, exp, check_index_type=True) - - keys = [ - pd.Period("2011-01-03", freq="D"), - pd.Period("2011-01-02", freq="D"), - pd.Period("2011-01-03", freq="D"), - ] with pytest.raises(KeyError, match="with any missing labels"): ser.loc[keys] diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 1b78ba6defd69..9f86e78fc36c4 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -140,8 +140,11 @@ def test_scalar_with_mixed(self): expected = 3 assert result == expected + @pytest.mark.parametrize( + "idxr,getitem", [(lambda x: x.loc, False), (lambda x: x, True)] + ) @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) - def test_scalar_integer(self, index_func, frame_or_series): + def test_scalar_integer(self, index_func, frame_or_series, idxr, getitem): # test how scalar float indexers work on int indexes @@ -150,37 +153,39 @@ def test_scalar_integer(self, index_func, frame_or_series): obj = gen_obj(frame_or_series, i) # coerce to equal int - for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]: - result = idxr(obj)[3.0] - self.check(result, obj, 3, getitem) + result = idxr(obj)[3.0] + self.check(result, obj, 3, getitem) - # coerce to equal int - for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]: - - if isinstance(obj, Series): + if isinstance(obj, Series): - def compare(x, y): - assert x == y + def compare(x, y): + assert x == y - expected = 100 + expected = 100 + else: + compare = tm.assert_series_equal + if getitem: + expected = Series(100, index=range(len(obj)), name=3) else: - compare = tm.assert_series_equal - if getitem: - expected = Series(100, index=range(len(obj)), name=3) - else: - expected = Series(100.0, index=range(len(obj)), name=3) + expected = Series(100.0, index=range(len(obj)), name=3) - s2 = obj.copy() - idxr(s2)[3.0] = 100 + s2 = obj.copy() + idxr(s2)[3.0] = 100 - result = idxr(s2)[3.0] - compare(result, expected) + result = idxr(s2)[3.0] + compare(result, expected) - result = idxr(s2)[3] - compare(result, expected) + result = idxr(s2)[3] + compare(result, expected) + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) + def test_scalar_integer_contains_float(self, index_func, frame_or_series): # contains + # integer index + index = index_func(5) + obj = gen_obj(frame_or_series, index) + # coerce to equal int assert 3.0 in obj diff --git a/pandas/tests/indexing/test_iat.py b/pandas/tests/indexing/test_iat.py index b1025b99e9bd5..84bd1d63f6bbc 100644 --- a/pandas/tests/indexing/test_iat.py +++ b/pandas/tests/indexing/test_iat.py @@ -1,4 +1,6 @@ -import pandas as pd +import numpy as np + +from pandas import DataFrame, Series, period_range def test_iat(float_frame): @@ -12,5 +14,14 @@ def test_iat(float_frame): def test_iat_duplicate_columns(): # https://github.com/pandas-dev/pandas/issues/11754 - df = pd.DataFrame([[1, 2]], columns=["x", "x"]) + df = DataFrame([[1, 2]], columns=["x", "x"]) assert df.iat[0, 0] == 1 + + +def test_iat_getitem_series_with_period_index(): + # GH#4390, iat incorrectly indexing + index = period_range("1/1/2001", periods=10) + ser = Series(np.random.randn(10), index=index) + expected = ser[index[0]] + result = ser.iat[0] + assert expected == result diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index bc40079e3169b..9ae9566ac87ef 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -801,6 +801,36 @@ def test_iloc_setitem_empty_frame_raises_with_3d_ndarray(self): with pytest.raises(ValueError, match=msg): obj.iloc[nd3] = 0 + @pytest.mark.parametrize("indexer", [lambda x: x.loc, lambda x: x.iloc]) + def test_iloc_getitem_read_only_values(self, indexer): + # GH#10043 this is fundamentally a test for iloc, but test loc while + # we're here + rw_array = np.eye(10) + rw_df = DataFrame(rw_array) + + ro_array = np.eye(10) + ro_array.setflags(write=False) + ro_df = DataFrame(ro_array) + + tm.assert_frame_equal(indexer(rw_df)[[1, 2, 3]], indexer(ro_df)[[1, 2, 3]]) + tm.assert_frame_equal(indexer(rw_df)[[1]], indexer(ro_df)[[1]]) + tm.assert_series_equal(indexer(rw_df)[1], indexer(ro_df)[1]) + tm.assert_frame_equal(indexer(rw_df)[1:3], indexer(ro_df)[1:3]) + + def test_iloc_getitem_readonly_key(self): + # GH#17192 iloc with read-only array raising TypeError + df = DataFrame({"data": np.ones(100, dtype="float64")}) + indices = np.array([1, 3, 6]) + indices.flags.writeable = False + + result = df.iloc[indices] + expected = df.loc[[1, 3, 6]] + tm.assert_frame_equal(result, expected) + + result = df["data"].iloc[indices] + expected = df["data"].loc[[1, 3, 6]] + tm.assert_series_equal(result, expected) + def test_iloc_assign_series_to_df_cell(self): # GH 37593 df = DataFrame(columns=["a"], index=[0]) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 87ee23dc78f89..b52c2ebbbc584 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -17,6 +17,23 @@ from .test_floats import gen_obj + +def getitem(x): + return x + + +def setitem(x): + return x + + +def loc(x): + return x.loc + + +def iloc(x): + return x.iloc + + # ------------------------------------------------------------------------ # Indexing test cases @@ -55,15 +72,8 @@ def test_setitem_ndarray_1d(self): with pytest.raises(ValueError, match=msg): df[2:5] = np.arange(1, 4) * 1j - @pytest.mark.parametrize( - "idxr, idxr_id", - [ - (lambda x: x, "getitem"), - (lambda x: x.loc, "loc"), - (lambda x: x.iloc, "iloc"), - ], - ) - def test_getitem_ndarray_3d(self, index, frame_or_series, idxr, idxr_id): + @pytest.mark.parametrize("idxr", [getitem, loc, iloc]) + def test_getitem_ndarray_3d(self, index, frame_or_series, idxr): # GH 25567 obj = gen_obj(frame_or_series, index) idxr = idxr(obj) @@ -85,26 +95,19 @@ def test_getitem_ndarray_3d(self, index, frame_or_series, idxr, idxr_id): with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): idxr[nd3] - @pytest.mark.parametrize( - "idxr, idxr_id", - [ - (lambda x: x, "setitem"), - (lambda x: x.loc, "loc"), - (lambda x: x.iloc, "iloc"), - ], - ) - def test_setitem_ndarray_3d(self, index, frame_or_series, idxr, idxr_id): + @pytest.mark.parametrize("indexer", [setitem, loc, iloc]) + def test_setitem_ndarray_3d(self, index, frame_or_series, indexer): # GH 25567 obj = gen_obj(frame_or_series, index) - idxr = idxr(obj) + idxr = indexer(obj) nd3 = np.random.randint(5, size=(2, 2, 2)) - if idxr_id == "iloc": + if indexer.__name__ == "iloc": err = ValueError msg = f"Cannot set values with ndim > {obj.ndim}" elif ( isinstance(index, pd.IntervalIndex) - and idxr_id == "setitem" + and indexer.__name__ == "setitem" and obj.ndim == 1 ): err = AttributeError @@ -294,7 +297,7 @@ def test_dups_fancy_indexing2(self): result = df.loc[[1, 2], ["a", "b"]] tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize("case", [lambda s: s, lambda s: s.loc]) + @pytest.mark.parametrize("case", [getitem, loc]) def test_duplicate_int_indexing(self, case): # GH 17347 s = Series(range(3), index=[1, 1, 3]) @@ -591,7 +594,7 @@ def test_astype_assignment(self): expected = DataFrame({"A": [1, 2, 3, 4]}) tm.assert_frame_equal(df, expected) - @pytest.mark.parametrize("indexer", [lambda x: x.loc, lambda x: x]) + @pytest.mark.parametrize("indexer", [getitem, loc]) def test_index_type_coercion(self, indexer): # GH 11836 @@ -998,43 +1001,6 @@ def test_extension_array_cross_section_converts(): tm.assert_series_equal(result, expected) -def test_readonly_indices(): - # GH#17192 iloc with read-only array raising TypeError - df = DataFrame({"data": np.ones(100, dtype="float64")}) - indices = np.array([1, 3, 6]) - indices.flags.writeable = False - - result = df.iloc[indices] - expected = df.loc[[1, 3, 6]] - tm.assert_frame_equal(result, expected) - - result = df["data"].iloc[indices] - expected = df["data"].loc[[1, 3, 6]] - tm.assert_series_equal(result, expected) - - -def test_1tuple_without_multiindex(): - ser = Series(range(5)) - key = (slice(3),) - - result = ser[key] - expected = ser[key[0]] - tm.assert_series_equal(result, expected) - - -def test_duplicate_index_mistyped_key_raises_keyerror(): - # GH#29189 float_index.get_loc(None) should raise KeyError, not TypeError - ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0]) - with pytest.raises(KeyError, match="None"): - ser[None] - - with pytest.raises(KeyError, match="None"): - ser.index.get_loc(None) - - with pytest.raises(KeyError, match="None"): - ser.index._engine.get_loc(None) - - def test_setitem_with_bool_mask_and_values_matching_n_trues_in_length(): # GH 30567 ser = Series([None] * 10) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 28846bcf2f14d..07b7c5c6767c3 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1573,6 +1573,14 @@ def test_loc_getitem_slice_unordered_dt_index(self, frame_or_series, start): with tm.assert_produces_warning(FutureWarning): obj.loc[start:"2022"] + @pytest.mark.parametrize("value", [1, 1.5]) + def test_loc_getitem_slice_labels_int_in_object_index(self, frame_or_series, value): + # GH: 26491 + obj = frame_or_series(range(4), index=[value, "first", 2, "third"]) + result = obj.loc[value:"third"] + expected = frame_or_series(range(4), index=[value, "first", 2, "third"]) + tm.assert_equal(result, expected) + class TestLocBooleanMask: def test_loc_setitem_bool_mask_timedeltaindex(self): @@ -1999,12 +2007,3 @@ def test_loc_setitem_dt64tz_values(self): s2["a"] = expected result = s2["a"] assert result == expected - - -@pytest.mark.parametrize("value", [1, 1.5]) -def test_loc_int_in_object_index(frame_or_series, value): - # GH: 26491 - obj = frame_or_series(range(4), index=[value, "first", 2, "third"]) - result = obj.loc[value:"third"] - expected = frame_or_series(range(4), index=[value, "first", 2, "third"]) - tm.assert_equal(result, expected) diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index 230725d8ee11d..dd01f4e6a4f49 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -4,7 +4,7 @@ import numpy as np import pytest -from pandas import DataFrame, Series, Timedelta, Timestamp, date_range, period_range +from pandas import DataFrame, Series, Timedelta, Timestamp, date_range import pandas._testing as tm from pandas.tests.indexing.common import Base @@ -146,18 +146,7 @@ def test_frame_at_with_duplicate_axes(self): expected = Series([2.0, 2.0], index=["A", "A"], name=1) tm.assert_series_equal(df.iloc[1], expected) - # TODO: belongs somewhere else? - def test_getitem_list_missing_key(self): - # GH 13822, incorrect error string with non-unique columns when missing - # column is accessed - df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]}) - df.columns = ["x", "x", "z"] - - # Check that we get the correct value in the KeyError - with pytest.raises(KeyError, match=r"\['y'\] not in index"): - df[["x", "y", "z"]] - - def test_at_with_tz(self): + def test_at_getitem_dt64tz_values(self): # gh-15822 df = DataFrame( { @@ -178,14 +167,6 @@ def test_at_with_tz(self): result = df.at[0, "date"] assert result == expected - def test_at_setitem_expansion_series_dt64tz_value(self, tz_naive_fixture): - # GH 25506 - ts = Timestamp("2017-08-05 00:00:00+0100", tz=tz_naive_fixture) - result = Series(ts) - result.at[1] = ts - expected = Series([ts, ts]) - tm.assert_series_equal(result, expected) - def test_mixed_index_at_iat_loc_iloc_series(self): # GH 19860 s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2]) @@ -259,15 +240,6 @@ def test_iat_dont_wrap_object_datetimelike(): assert not isinstance(result, Timedelta) -def test_iat_series_with_period_index(): - # GH 4390, iat incorrectly indexing - index = period_range("1/1/2001", periods=10) - ser = Series(np.random.randn(10), index=index) - expected = ser[index[0]] - result = ser.iat[0] - assert expected == result - - def test_at_with_tuple_index_get(): # GH 26989 # DataFrame.at getter works with Index of tuples diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index 7b794668803c3..3686337141420 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -427,3 +427,25 @@ def test_getitem_assignment_series_aligment(): ser[idx] = Series([10, 11, 12]) expected = Series([0, 1, 10, 3, 11, 5, 6, 7, 8, 12]) tm.assert_series_equal(ser, expected) + + +def test_getitem_duplicate_index_mistyped_key_raises_keyerror(): + # GH#29189 float_index.get_loc(None) should raise KeyError, not TypeError + ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0]) + with pytest.raises(KeyError, match="None"): + ser[None] + + with pytest.raises(KeyError, match="None"): + ser.index.get_loc(None) + + with pytest.raises(KeyError, match="None"): + ser.index._engine.get_loc(None) + + +def test_getitem_1tuple_slice_without_multiindex(): + ser = Series(range(5)) + key = (slice(3),) + + result = ser[key] + expected = ser[key[0]] + tm.assert_series_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/38005
2020-11-22T19:29:47Z
2020-11-23T13:25:04Z
2020-11-23T13:25:04Z
2020-11-23T15:14:26Z
REF: ensure_arraylike in algos.isin
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index be091314e6c25..a3abfaa48500c 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -218,7 +218,8 @@ def _ensure_arraylike(values): """ if not is_array_like(values): inferred = lib.infer_dtype(values, skipna=False) - if inferred in ["mixed", "string"]: + if inferred in ["mixed", "string", "mixed-integer"]: + # "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160 if isinstance(values, tuple): values = list(values) values = construct_1d_object_array_from_listlike(values) @@ -424,6 +425,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: values = construct_1d_object_array_from_listlike(list(values)) # TODO: could use ensure_arraylike here + comps = _ensure_arraylike(comps) comps = extract_array(comps, extract_numpy=True) if is_categorical_dtype(comps): # TODO(extension)
This is one of two places where we do not explicitly call ensure_arraylike before ensure_data. Once I get the other one sorted out, we can annotate _ensure_data and others.
https://api.github.com/repos/pandas-dev/pandas/pulls/38004
2020-11-22T18:48:28Z
2020-11-23T13:25:35Z
2020-11-23T13:25:34Z
2020-11-23T15:17:43Z
DOC: Whatsnew 1.2.0 cleanup
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index d860be54c56db..13f0d222fc4ff 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -22,7 +22,7 @@ Optionally disallow duplicate labels control whether the index or columns can contain duplicate labels (:issue:`28394`). This can be used to prevent accidental introduction of duplicate labels, which can affect downstream operations. -By default, duplicates continue to be allowed +By default, duplicates continue to be allowed. .. ipython:: python @@ -84,7 +84,7 @@ Support for binary file handles in ``to_csv`` :meth:`to_csv` supports file handles in binary mode (:issue:`19827` and :issue:`35058`) with ``encoding`` (:issue:`13068` and :issue:`23854`) and ``compression`` (:issue:`22555`). -If Pandas does not automatically detect whether the file handle is opened in binary or text mode, +If pandas does not automatically detect whether the file handle is opened in binary or text mode, it is necessary to provide ``mode="wb"``. For example: @@ -104,7 +104,7 @@ Support for short caption and table position in ``to_latex`` a floating table position (:issue:`35281`) and a short caption (:issue:`36267`). -New keyword ``position`` is implemented to set the position. +The keyword ``position`` has been added to set the position. .. ipython:: python @@ -112,9 +112,9 @@ New keyword ``position`` is implemented to set the position. table = data.to_latex(position='ht') print(table) -Usage of keyword ``caption`` is extended. +Usage of the keyword ``caption`` has been extended. Besides taking a single string as an argument, -one can optionally provide a tuple of ``(full_caption, short_caption)`` +one can optionally provide a tuple ``(full_caption, short_caption)`` to add a short caption macro. .. ipython:: python @@ -141,12 +141,12 @@ parser by default should have no impact on performance. (:issue:`17154`) Experimental nullable data types for float data ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We've added :class:`Float32Dtype` / :class:`Float64Dtype` and :class:`~arrays.FloatingArray`, -an extension data type dedicated to floating point data that can hold the +We've added :class:`Float32Dtype` / :class:`Float64Dtype` and :class:`~arrays.FloatingArray`. +These are extension data types dedicated to floating point data that can hold the ``pd.NA`` missing value indicator (:issue:`32265`, :issue:`34307`). While the default float data type already supports missing values using ``np.nan``, -this new data type uses ``pd.NA`` (and its corresponding behaviour) as missing +these new data types use ``pd.NA`` (and its corresponding behaviour) as the missing value indicator, in line with the already existing nullable :ref:`integer <integer_na>` and :ref:`boolean <boolean>` data types. @@ -180,7 +180,7 @@ Alternatively, you can also use the dtype object: .. warning:: - Experimental: the new floating data types are currently experimental, and its + Experimental: the new floating data types are currently experimental, and their behaviour or API may still change without warning. Especially the behaviour regarding NaN (distinct from NA missing values) is subject to change. @@ -189,8 +189,8 @@ Alternatively, you can also use the dtype object: Index/column name preservation when aggregating ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When aggregating using :meth:`concat` or the :class:`DataFrame` constructor, Pandas -will attempt to preserve index (and column) names whenever possible (:issue:`35847`). +When aggregating using :meth:`concat` or the :class:`DataFrame` constructor, pandas +will now attempt to preserve index and column names whenever possible (:issue:`35847`). In the case where all inputs share a common name, this name will be assigned to the result. When the input names do not all agree, the result will be unnamed. Here is an example where the index name is preserved: @@ -209,7 +209,7 @@ level-by-level basis. Groupby supports EWM operations directly ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:class:`DataFrameGroupBy` now supports exponentially weighted window operations directly (:issue:`16037`). +:class:`.DataFrameGroupBy` now supports exponentially weighted window operations directly (:issue:`16037`). .. ipython:: python @@ -225,37 +225,36 @@ to use this feature. Other enhancements ^^^^^^^^^^^^^^^^^^ -- Added ``day_of_week``(compatibility alias ``dayofweek``) property to ``Timestamp``, ``DatetimeIndex``, ``Period``, ``PeriodIndex`` (:issue:`9605`) -- Added ``day_of_year`` (compatibility alias ``dayofyear``) property to ``Timestamp``, ``DatetimeIndex``, ``Period``, ``PeriodIndex`` (:issue:`9605`) -- Added :meth:`~DataFrame.set_flags` for setting table-wide flags on a ``Series`` or ``DataFrame`` (:issue:`28394`) +- Added ``day_of_week`` (compatibility alias ``dayofweek``) property to :class:`Timestamp`, :class:`.DatetimeIndex`, :class:`Period`, :class:`PeriodIndex` (:issue:`9605`) +- Added ``day_of_year`` (compatibility alias ``dayofyear``) property to :class:`Timestamp`, :class:`.DatetimeIndex`, :class:`Period`, :class:`PeriodIndex` (:issue:`9605`) +- Added :meth:`~DataFrame.set_flags` for setting table-wide flags on a Series or DataFrame (:issue:`28394`) - :meth:`DataFrame.applymap` now supports ``na_action`` (:issue:`23803`) - :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) - :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`) - :meth:`DataFrame.hist` now supports time series (datetime) data (:issue:`32590`) -- :meth:`Styler.set_table_styles` now allows the direct styling of rows and columns and can be chained (:issue:`35607`) -- ``Styler`` now allows direct CSS class name addition to individual data cells (:issue:`36159`) -- :meth:`Rolling.mean()` and :meth:`Rolling.sum()` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`) -- :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`) -- +- :meth:`.Styler.set_table_styles` now allows the direct styling of rows and columns and can be chained (:issue:`35607`) +- :class:`.Styler` now allows direct CSS class name addition to individual data cells (:issue:`36159`) +- :meth:`.Rolling.mean` and :meth:`.Rolling.sum` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`) +- :meth:`.DatetimeIndex.searchsorted`, :meth:`.TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`) - Added methods :meth:`IntegerArray.prod`, :meth:`IntegerArray.min`, and :meth:`IntegerArray.max` (:issue:`33790`) - Calling a NumPy ufunc on a ``DataFrame`` with extension types now preserves the extension types when possible (:issue:`23743`). - Calling a binary-input NumPy ufunc on multiple ``DataFrame`` objects now aligns, matching the behavior of binary operations and ufuncs on ``Series`` (:issue:`23743`). - Where possible :meth:`RangeIndex.difference` and :meth:`RangeIndex.symmetric_difference` will return :class:`RangeIndex` instead of :class:`Int64Index` (:issue:`36564`) - :meth:`DataFrame.to_parquet` now supports :class:`MultiIndex` for columns in parquet format (:issue:`34777`) -- Added :meth:`Rolling.sem()` and :meth:`Expanding.sem()` to compute the standard error of mean (:issue:`26476`). -- :meth:`Rolling.var()` and :meth:`Rolling.std()` use Kahan summation and Welfords Method to avoid numerical issues (:issue:`37051`) -- :meth:`DataFrame.corr` and :meth:`DataFrame.cov` use Welfords Method to avoid numerical issues (:issue:`37448`) +- Added :meth:`.Rolling.sem` and :meth:`Expanding.sem` to compute the standard error of the mean (:issue:`26476`) +- :meth:`.Rolling.var` and :meth:`.Rolling.std` use Kahan summation and Welford's Method to avoid numerical issues (:issue:`37051`) +- :meth:`DataFrame.corr` and :meth:`DataFrame.cov` use Welford's Method to avoid numerical issues (:issue:`37448`) - :meth:`DataFrame.plot` now recognizes ``xlabel`` and ``ylabel`` arguments for plots of type ``scatter`` and ``hexbin`` (:issue:`37001`) -- :class:`DataFrame` now supports ``divmod`` operation (:issue:`37165`) +- :class:`DataFrame` now supports the ``divmod`` operation (:issue:`37165`) - :meth:`DataFrame.to_parquet` now returns a ``bytes`` object when no ``path`` argument is passed (:issue:`37105`) -- :class:`Rolling` now supports the ``closed`` argument for fixed windows (:issue:`34315`) -- :class:`DatetimeIndex` and :class:`Series` with ``datetime64`` or ``datetime64tz`` dtypes now support ``std`` (:issue:`37436`) +- :class:`.Rolling` now supports the ``closed`` argument for fixed windows (:issue:`34315`) +- :class:`.DatetimeIndex` and :class:`Series` with ``datetime64`` or ``datetime64tz`` dtypes now support ``std`` (:issue:`37436`) - :class:`Window` now supports all Scipy window types in ``win_type`` with flexible keyword argument support (:issue:`34556`) - :meth:`testing.assert_index_equal` now has a ``check_order`` parameter that allows indexes to be checked in an order-insensitive manner (:issue:`37478`) - :func:`read_csv` supports memory-mapping for compressed files (:issue:`37621`) -- Improve error reporting for :meth:`DataFrame.merge()` when invalid merge column definitions were given (:issue:`16228`) -- Improve numerical stability for :meth:`Rolling.skew()`, :meth:`Rolling.kurt()`, :meth:`Expanding.skew()` and :meth:`Expanding.kurt()` through implementation of Kahan summation (:issue:`6929`) -- Improved error reporting for subsetting columns of a :class:`DataFrameGroupBy` with ``axis=1`` (:issue:`37725`) +- Improve error reporting for :meth:`DataFrame.merge` when invalid merge column definitions were given (:issue:`16228`) +- Improve numerical stability for :meth:`.Rolling.skew`, :meth:`.Rolling.kurt`, :meth:`Expanding.skew` and :meth:`Expanding.kurt` through implementation of Kahan summation (:issue:`6929`) +- Improved error reporting for subsetting columns of a :class:`.DataFrameGroupBy` with ``axis=1`` (:issue:`37725`) .. --------------------------------------------------------------------------- @@ -273,7 +272,7 @@ determines whether to exclude object-dtype columns on a column-by-column basis, instead of checking if *all* object-dtype columns can be considered boolean. This prevents pathological behavior where applying the reduction on a subset -of columns could result in a larger :class:`Series` result. See (:issue:`37799`). +of columns could result in a larger Series result. See (:issue:`37799`). .. ipython:: python @@ -305,7 +304,7 @@ of columns could result in a larger :class:`Series` result. See (:issue:`37799`) In [6]: df[["B", "C"]].all(bool_only=True) -Other :class:`DataFrame` reductions with ``numeric_only=None`` will also avoid +Other DataFrame reductions with ``numeric_only=None`` will also avoid this pathological behavior (:issue:`37827`): .. ipython:: python @@ -333,11 +332,11 @@ this pathological behavior (:issue:`37827`): df[["A"]].mean() -Moreover, :class:`DataFrame` reductions with ``numeric_only=None`` will now be -consistent with their :class:`Series` counterparts. In particular, for -reductions where the :class:`Series` method raises ``TypeError``, the -:class:`DataFrame` reduction will now consider that column non-numeric -instead of casting to NumPy which may have different semantics (:issue:`36076`, +Moreover, DataFrame reductions with ``numeric_only=None`` will now be +consistent with their Series counterparts. In particular, for +reductions where the Series method raises ``TypeError``, the +DataFrame reduction will now consider that column non-numeric +instead of casting to a NumPy array which may have different semantics (:issue:`36076`, :issue:`28949`, :issue:`21020`). .. ipython:: python @@ -448,11 +447,11 @@ See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for mor Other API changes ^^^^^^^^^^^^^^^^^ -- Sorting in descending order is now stable for :meth:`Series.sort_values` and :meth:`Index.sort_values` for DateTime-like :class:`Index` subclasses. This will affect sort order when sorting :class:`DataFrame` on multiple columns, sorting with a key function that produces duplicates, or requesting the sorting index when using :meth:`Index.sort_values`. When using :meth:`Series.value_counts`, count of missing values is no longer the last in the list of duplicate counts, and its position corresponds to the position in the original :class:`Series`. When using :meth:`Index.sort_values` for DateTime-like :class:`Index` subclasses, NaTs ignored the ``na_position`` argument and were sorted to the beggining. Now they respect ``na_position``, the default being ``last``, same as other :class:`Index` subclasses. (:issue:`35992`) -- Passing an invalid ``fill_value`` to :meth:`Categorical.take`, :meth:`DatetimeArray.take`, :meth:`TimedeltaArray.take`, :meth:`PeriodArray.take` now raises ``TypeError`` instead of ``ValueError`` (:issue:`37733`) -- Passing an invalid ``fill_value`` to :meth:`Series.shift` with a ``CategoricalDtype`` now raises ``TypeError`` instead of ``ValueError`` (:issue:`37733`) +- Sorting in descending order is now stable for :meth:`Series.sort_values` and :meth:`Index.sort_values` for DateTime-like :class:`Index` subclasses. This will affect sort order when sorting a DataFrame on multiple columns, sorting with a key function that produces duplicates, or requesting the sorting index when using :meth:`Index.sort_values`. When using :meth:`Series.value_counts`, the count of missing values is no longer necessarily last in the list of duplicate counts. Instead, its position corresponds to the position in the original Series. When using :meth:`Index.sort_values` for DateTime-like :class:`Index` subclasses, NaTs ignored the ``na_position`` argument and were sorted to the beginning. Now they respect ``na_position``, the default being ``last``, same as other :class:`Index` subclasses. (:issue:`35992`) +- Passing an invalid ``fill_value`` to :meth:`Categorical.take`, :meth:`.DatetimeArray.take`, :meth:`TimedeltaArray.take`, or :meth:`PeriodArray.take` now raises a ``TypeError`` instead of a ``ValueError`` (:issue:`37733`) +- Passing an invalid ``fill_value`` to :meth:`Series.shift` with a ``CategoricalDtype`` now raises a ``TypeError`` instead of a ``ValueError`` (:issue:`37733`) - Passing an invalid value to :meth:`IntervalIndex.insert` or :meth:`CategoricalIndex.insert` now raises a ``TypeError`` instead of a ``ValueError`` (:issue:`37733`) -- Attempting to reindex a :class:`Series` with a :class:`CategoricalIndex` with an invalid ``fill_value`` now raises ``TypeError`` instead of ``ValueError`` (:issue:`37733`) +- Attempting to reindex a Series with a :class:`CategoricalIndex` with an invalid ``fill_value`` now raises a ``TypeError`` instead of a ``ValueError`` (:issue:`37733`) .. --------------------------------------------------------------------------- @@ -461,13 +460,13 @@ Other API changes Deprecations ~~~~~~~~~~~~ - Deprecated parameter ``inplace`` in :meth:`MultiIndex.set_codes` and :meth:`MultiIndex.set_levels` (:issue:`35626`) -- Deprecated parameter ``dtype`` in :meth:`~Index.copy` on method all index classes. Use the :meth:`~Index.astype` method instead for changing dtype (:issue:`35853`) -- Deprecated parameters ``levels`` and ``codes`` in :meth:`~MultiIndex.copy`. Use the :meth:`~MultiIndex.set_levels` and :meth:`~MultiIndex.set_codes` methods instead (:issue:`36685`) +- Deprecated parameter ``dtype`` of method :meth:`~Index.copy` for all :class:`Index` subclasses. Use the :meth:`~Index.astype` method instead for changing dtype (:issue:`35853`) +- Deprecated parameters ``levels`` and ``codes`` in :meth:`MultiIndex.copy`. Use the :meth:`~MultiIndex.set_levels` and :meth:`~MultiIndex.set_codes` methods instead (:issue:`36685`) - Date parser functions :func:`~pandas.io.date_converters.parse_date_time`, :func:`~pandas.io.date_converters.parse_date_fields`, :func:`~pandas.io.date_converters.parse_all_fields` and :func:`~pandas.io.date_converters.generic_parser` from ``pandas.io.date_converters`` are deprecated and will be removed in a future version; use :func:`to_datetime` instead (:issue:`35741`) - :meth:`DataFrame.lookup` is deprecated and will be removed in a future version, use :meth:`DataFrame.melt` and :meth:`DataFrame.loc` instead (:issue:`18682`) -- The :meth:`Index.to_native_types` is deprecated. Use ``.astype(str)`` instead (:issue:`28867`) +- The method :meth:`Index.to_native_types` is deprecated. Use ``.astype(str)`` instead (:issue:`28867`) - Deprecated indexing :class:`DataFrame` rows with datetime-like strings ``df[string]``, use ``df.loc[string]`` instead (:issue:`36179`) -- Deprecated casting an object-dtype index of ``datetime`` objects to :class:`DatetimeIndex` in the :class:`Series` constructor (:issue:`23598`) +- Deprecated casting an object-dtype index of ``datetime`` objects to :class:`.DatetimeIndex` in the :class:`Series` constructor (:issue:`23598`) - Deprecated :meth:`Index.is_all_dates` (:issue:`27744`) - The default value of ``regex`` for :meth:`Series.str.replace` will change from ``True`` to ``False`` in a future release. In addition, single character regular expressions will *not* be treated as literal strings when ``regex=True`` is set. (:issue:`24804`) - Deprecated automatic alignment on comparison operations between :class:`DataFrame` and :class:`Series`, do ``frame, ser = frame.align(ser, axis=1, copy=False)`` before e.g. ``frame == ser`` (:issue:`28759`) @@ -479,9 +478,9 @@ Deprecations - :class:`Index` methods ``&``, ``|``, and ``^`` behaving as the set operations :meth:`Index.intersection`, :meth:`Index.union`, and :meth:`Index.symmetric_difference`, respectively, are deprecated and in the future will behave as pointwise boolean operations matching :class:`Series` behavior. Use the named set methods instead (:issue:`36758`) - :meth:`Categorical.is_dtype_equal` and :meth:`CategoricalIndex.is_dtype_equal` are deprecated, will be removed in a future version (:issue:`37545`) - :meth:`Series.slice_shift` and :meth:`DataFrame.slice_shift` are deprecated, use :meth:`Series.shift` or :meth:`DataFrame.shift` instead (:issue:`37601`) -- Partial slicing on unordered :class:`DatetimeIndex` with keys, which are not in Index is deprecated and will be removed in a future version (:issue:`18531`) +- Partial slicing on unordered :class:`.DatetimeIndex` objects with keys that are not in the index is deprecated and will be removed in a future version (:issue:`18531`) - The ``how`` keyword in :meth:`PeriodIndex.astype` is deprecated and will be removed in a future version, use ``index.to_timestamp(how=how)`` instead (:issue:`37982`) -- Deprecated :meth:`Index.asi8` for :class:`Index` subclasses other than :class:`DatetimeIndex`, :class:`TimedeltaIndex`, and :class:`PeriodIndex` (:issue:`37877`) +- Deprecated :meth:`Index.asi8` for :class:`Index` subclasses other than :class:`.DatetimeIndex`, :class:`.TimedeltaIndex`, and :class:`PeriodIndex` (:issue:`37877`) - The ``inplace`` parameter of :meth:`Categorical.remove_unused_categories` is deprecated and will be removed in a future version (:issue:`37643`) .. --------------------------------------------------------------------------- @@ -493,22 +492,22 @@ Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Performance improvements when creating DataFrame or Series with dtype ``str`` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`, :issue:`36325`, :issue:`36432`, :issue:`37371`) -- Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) -- Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`) -- Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`) -- ``Styler`` uuid method altered to compress data transmission over web whilst maintaining reasonably low table collision probability (:issue:`36345`) -- Performance improvement in :meth:`pd.to_datetime` with non-ns time unit for ``float`` ``dtype`` columns (:issue:`20445`) -- Performance improvement in setting values on a :class:`IntervalArray` (:issue:`36310`) +- Performance improvement in :meth:`.GroupBy.agg` with the ``numba`` engine (:issue:`35759`) +- Performance improvements when creating :meth:`Series.map` from a huge dictionary (:issue:`34717`) +- Performance improvement in :meth:`.GroupBy.transform` with the ``numba`` engine (:issue:`36240`) +- :class:`.Styler` uuid method altered to compress data transmission over web whilst maintaining reasonably low table collision probability (:issue:`36345`) +- Performance improvement in :func:`to_datetime` with non-ns time unit for ``float`` ``dtype`` columns (:issue:`20445`) +- Performance improvement in setting values on an :class:`IntervalArray` (:issue:`36310`) - The internal index method :meth:`~Index._shallow_copy` now makes the new index and original index share cached attributes, avoiding creating these again, if created on either. This can speed up operations that depend on creating copies of existing indexes (:issue:`36840`) -- Performance improvement in :meth:`RollingGroupby.count` (:issue:`35625`) -- Small performance decrease to :meth:`Rolling.min` and :meth:`Rolling.max` for fixed windows (:issue:`36567`) +- Performance improvement in :meth:`.RollingGroupby.count` (:issue:`35625`) +- Small performance decrease to :meth:`.Rolling.min` and :meth:`.Rolling.max` for fixed windows (:issue:`36567`) - Reduced peak memory usage in :meth:`DataFrame.to_pickle` when using ``protocol=5`` in python 3.8+ (:issue:`34244`) -- faster ``dir`` calls when many index labels, e.g. ``dir(ser)`` (:issue:`37450`) +- Faster ``dir`` calls when the object has many index labels, e.g. ``dir(ser)`` (:issue:`37450`) - Performance improvement in :class:`ExpandingGroupby` (:issue:`37064`) - Performance improvement in :meth:`Series.astype` and :meth:`DataFrame.astype` for :class:`Categorical` (:issue:`8628`) -- Performance improvement in :meth:`pd.DataFrame.groupby` for ``float`` ``dtype`` (:issue:`28303`), changes of the underlying hash-function can lead to changes in float based indexes sort ordering for ties (e.g. :meth:`pd.Index.value_counts`) -- Performance improvement in :meth:`pd.isin` for inputs with more than 1e6 elements +- Performance improvement in :meth:`DataFrame.groupby` for ``float`` ``dtype`` (:issue:`28303`), changes of the underlying hash-function can lead to changes in float based indexes sort ordering for ties (e.g. :meth:`Index.value_counts`) +- Performance improvement in :meth:`pd.isin` for inputs with more than 1e6 elements (:issue:`36611`) .. --------------------------------------------------------------------------- @@ -519,39 +518,39 @@ Bug fixes Categorical ^^^^^^^^^^^ -- :meth:`Categorical.fillna` will always return a copy, will validate a passed fill value regardless of whether there are any NAs to fill, and will disallow a ``NaT`` as a fill value for numeric categories (:issue:`36530`) +- :meth:`Categorical.fillna` will always return a copy, validate a passed fill value regardless of whether there are any NAs to fill, and disallow an ``NaT`` as a fill value for numeric categories (:issue:`36530`) - Bug in :meth:`Categorical.__setitem__` that incorrectly raised when trying to set a tuple value (:issue:`20439`) - Bug in :meth:`CategoricalIndex.equals` incorrectly casting non-category entries to ``np.nan`` (:issue:`37667`) -- Bug in :meth:`CatgoricalIndex.where` incorrectly setting non-category entries to ``np.nan`` instead of raising ``TypeError`` (:issue:`37977`) +- Bug in :meth:`CategoricalIndex.where` incorrectly setting non-category entries to ``np.nan`` instead of raising ``TypeError`` (:issue:`37977`) - Datetimelike ^^^^^^^^^^^^ -- Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`) +- Bug in :attr:`.DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`) - Bug in ``NaT`` comparisons failing to raise ``TypeError`` on invalid inequality comparisons (:issue:`35046`) -- Bug in :class:`DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`) -- Bug in :meth:`DatetimeIndex.get_slice_bound` where ``datetime.date`` objects were not accepted or naive :class:`Timestamp` with a tz-aware :class:`DatetimeIndex` (:issue:`35690`) -- Bug in :meth:`DatetimeIndex.slice_locs` where ``datetime.date`` objects were not accepted (:issue:`34077`) -- Bug in :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with ``datetime64``, ``timedelta64`` or ``Period`` dtype placement of ``NaT`` values being inconsistent with ``NumPy`` (:issue:`36176`, :issue:`36254`) -- Inconsistency in :class:`DatetimeArray`, :class:`TimedeltaArray`, and :class:`PeriodArray` setitem casting arrays of strings to datetimelike scalars but not scalar strings (:issue:`36261`) -- Bug in :meth:`DatetimeArray.take` incorrectly allowing ``fill_value`` with a mismatched timezone (:issue:`37356`) -- Bug in :class:`DatetimeIndex.shift` incorrectly raising when shifting empty indexes (:issue:`14811`) -- :class:`Timestamp` and :class:`DatetimeIndex` comparisons between timezone-aware and timezone-naive objects now follow the standard library ``datetime`` behavior, returning ``True``/``False`` for ``!=``/``==`` and raising for inequality comparisons (:issue:`28507`) -- Bug in :meth:`DatetimeIndex.equals` and :meth:`TimedeltaIndex.equals` incorrectly considering ``int64`` indexes as equal (:issue:`36744`) -- :meth:`to_json` and :meth:`read_json` now implements timezones parsing when orient structure is 'table'. -- :meth:`astype` now attempts to convert to 'datetime64[ns, tz]' directly from 'object' with inferred timezone from string (:issue:`35973`). -- Bug in :meth:`TimedeltaIndex.sum` and :meth:`Series.sum` with ``timedelta64`` dtype on an empty index or series returning ``NaT`` instead of ``Timedelta(0)`` (:issue:`31751`) -- Bug in :meth:`DatetimeArray.shift` incorrectly allowing ``fill_value`` with a mismatched timezone (:issue:`37299`) -- Bug in adding a :class:`BusinessDay` with nonzero ``offset`` to a non-scalar other (:issue:`37457`) +- Bug in :class:`.DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`) +- Bug in :meth:`.DatetimeIndex.get_slice_bound` where ``datetime.date`` objects were not accepted or naive :class:`Timestamp` with a tz-aware :class:`.DatetimeIndex` (:issue:`35690`) +- Bug in :meth:`.DatetimeIndex.slice_locs` where ``datetime.date`` objects were not accepted (:issue:`34077`) +- Bug in :meth:`.DatetimeIndex.searchsorted`, :meth:`.TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with ``datetime64``, ``timedelta64`` or :class:`Period` dtype placement of ``NaT`` values being inconsistent with NumPy (:issue:`36176`, :issue:`36254`) +- Inconsistency in :class:`.DatetimeArray`, :class:`.TimedeltaArray`, and :class:`.PeriodArray` method ``__setitem__`` casting arrays of strings to datetimelike scalars but not scalar strings (:issue:`36261`) +- Bug in :meth:`.DatetimeArray.take` incorrectly allowing ``fill_value`` with a mismatched timezone (:issue:`37356`) +- Bug in :class:`.DatetimeIndex.shift` incorrectly raising when shifting empty indexes (:issue:`14811`) +- :class:`Timestamp` and :class:`.DatetimeIndex` comparisons between timezone-aware and timezone-naive objects now follow the standard library ``datetime`` behavior, returning ``True``/``False`` for ``!=``/``==`` and raising for inequality comparisons (:issue:`28507`) +- Bug in :meth:`.DatetimeIndex.equals` and :meth:`.TimedeltaIndex.equals` incorrectly considering ``int64`` indexes as equal (:issue:`36744`) +- :meth:`Series.to_json`, :meth:`DataFrame.to_json`, and :meth:`read_json` now implement timezone parsing when orient structure is ``table`` (:issue:`35973`) +- :meth:`astype` now attempts to convert to ``datetime64[ns, tz]`` directly from ``object`` with inferred timezone from string (:issue:`35973`) +- Bug in :meth:`.TimedeltaIndex.sum` and :meth:`Series.sum` with ``timedelta64`` dtype on an empty index or series returning ``NaT`` instead of ``Timedelta(0)`` (:issue:`31751`) +- Bug in :meth:`.DatetimeArray.shift` incorrectly allowing ``fill_value`` with a mismatched timezone (:issue:`37299`) +- Bug in adding a :class:`.BusinessDay` with nonzero ``offset`` to a non-scalar other (:issue:`37457`) - Bug in :func:`to_datetime` with a read-only array incorrectly raising (:issue:`34857`) -- Bug in :meth:`Series.isin` with ``datetime64[ns]`` dtype and :meth:`DatetimeIndex.isin` incorrectly casting integers to datetimes (:issue:`36621`) -- Bug in :meth:`Series.isin` with ``datetime64[ns]`` dtype and :meth:`DatetimeIndex.isin` failing to consider timezone-aware and timezone-naive datetimes as always different (:issue:`35728`) +- Bug in :meth:`Series.isin` with ``datetime64[ns]`` dtype and :meth:`.DatetimeIndex.isin` incorrectly casting integers to datetimes (:issue:`36621`) +- Bug in :meth:`Series.isin` with ``datetime64[ns]`` dtype and :meth:`.DatetimeIndex.isin` failing to consider timezone-aware and timezone-naive datetimes as always different (:issue:`35728`) - Bug in :meth:`Series.isin` with ``PeriodDtype`` dtype and :meth:`PeriodIndex.isin` failing to consider arguments with different ``PeriodDtype`` as always different (:issue:`37528`) Timedelta ^^^^^^^^^ -- Bug in :class:`TimedeltaIndex`, :class:`Series`, and :class:`DataFrame` floor-division with ``timedelta64`` dtypes and ``NaT`` in the denominator (:issue:`35529`) -- Bug in parsing of ISO 8601 durations in :class:`Timedelta`, :meth:`pd.to_datetime` (:issue:`37159`, fixes :issue:`29773` and :issue:`36204`) +- Bug in :class:`.TimedeltaIndex`, :class:`Series`, and :class:`DataFrame` floor-division with ``timedelta64`` dtypes and ``NaT`` in the denominator (:issue:`35529`) +- Bug in parsing of ISO 8601 durations in :class:`Timedelta` and :func:`to_datetime` (:issue:`29773`, :issue:`36204`) - Bug in :func:`to_timedelta` with a read-only array incorrectly raising (:issue:`34857`) - Bug in :class:`Timedelta` incorrectly truncating to sub-second portion of a string input when it has precision higher than nanoseconds (:issue:`36738`) @@ -567,17 +566,17 @@ Numeric - Bug in :func:`to_numeric` where float precision was incorrect (:issue:`31364`) - Bug in :meth:`DataFrame.any` with ``axis=1`` and ``bool_only=True`` ignoring the ``bool_only`` keyword (:issue:`32432`) - Bug in :meth:`Series.equals` where a ``ValueError`` was raised when numpy arrays were compared to scalars (:issue:`35267`) -- Bug in :class:`Series` where two :class:`Series` each have a :class:`DatetimeIndex` with different timezones having those indexes incorrectly changed when performing arithmetic operations (:issue:`33671`) -- Bug in :meth:`pd._testing.assert_almost_equal` was incorrect for complex numeric types (:issue:`28235`) +- Bug in :class:`Series` where two Series each have a :class:`.DatetimeIndex` with different timezones having those indexes incorrectly changed when performing arithmetic operations (:issue:`33671`) +- Bug in :mod:`pandas.testing` module functions when used with ``check_exact=False`` on complex numeric types (:issue:`28235`) - Bug in :meth:`DataFrame.__rmatmul__` error handling reporting transposed shapes (:issue:`21581`) - Bug in :class:`Series` flex arithmetic methods where the result when operating with a ``list``, ``tuple`` or ``np.ndarray`` would have an incorrect name (:issue:`36760`) -- Bug in :class:`IntegerArray` multiplication with ``timedelta`` and ``np.timedelta64`` objects (:issue:`36870`) +- Bug in :class:`.IntegerArray` multiplication with ``timedelta`` and ``np.timedelta64`` objects (:issue:`36870`) - Bug in :class:`MultiIndex` comparison with tuple incorrectly treating tuple as array-like (:issue:`21517`) - Bug in :meth:`DataFrame.diff` with ``datetime64`` dtypes including ``NaT`` values failing to fill ``NaT`` results correctly (:issue:`32441`) - Bug in :class:`DataFrame` arithmetic ops incorrectly accepting keyword arguments (:issue:`36843`) -- Bug in :class:`IntervalArray` comparisons with :class:`Series` not returning :class:`Series` (:issue:`36908`) +- Bug in :class:`.IntervalArray` comparisons with :class:`Series` not returning Series (:issue:`36908`) - Bug in :class:`DataFrame` allowing arithmetic operations with list of array-likes with undefined results. Behavior changed to raising ``ValueError`` (:issue:`36702`) -- Bug in :meth:`DataFrame.std`` with ``timedelta64`` dtype and ``skipna=False`` (:issue:`37392`) +- Bug in :meth:`DataFrame.std` with ``timedelta64`` dtype and ``skipna=False`` (:issue:`37392`) - Bug in :meth:`DataFrame.min` and :meth:`DataFrame.max` with ``datetime64`` dtype and ``skipna=False`` (:issue:`36907`) Conversion @@ -589,7 +588,7 @@ Conversion Strings ^^^^^^^ - Bug in :meth:`Series.to_string`, :meth:`DataFrame.to_string`, and :meth:`DataFrame.to_latex` adding a leading space when ``index=False`` (:issue:`24980`) -- Bug in :func:`to_numeric` raising a ``TypeError`` when attempting to convert a string dtype :class:`Series` containing only numeric strings and ``NA`` (:issue:`37262`) +- Bug in :func:`to_numeric` raising a ``TypeError`` when attempting to convert a string dtype Series containing only numeric strings and ``NA`` (:issue:`37262`) - Interval @@ -604,22 +603,22 @@ Interval Indexing ^^^^^^^^ -- Bug in :meth:`PeriodIndex.get_loc` incorrectly raising ``ValueError`` on non-datelike strings instead of ``KeyError``, causing similar errors in :meth:`Series.__geitem__`, :meth:`Series.__contains__`, and :meth:`Series.loc.__getitem__` (:issue:`34240`) +- Bug in :meth:`PeriodIndex.get_loc` incorrectly raising ``ValueError`` on non-datelike strings instead of ``KeyError``, causing similar errors in :meth:`Series.__getitem__`, :meth:`Series.__contains__`, and :meth:`Series.loc.__getitem__` (:issue:`34240`) - Bug in :meth:`Index.sort_values` where, when empty values were passed, the method would break by trying to compare missing values instead of pushing them to the end of the sort order. (:issue:`35584`) -- Bug in :meth:`Index.get_indexer` and :meth:`Index.get_indexer_non_unique` where int64 arrays are returned instead of intp. (:issue:`36359`) +- Bug in :meth:`Index.get_indexer` and :meth:`Index.get_indexer_non_unique` where ``int64`` arrays are returned instead of ``intp``. (:issue:`36359`) - Bug in :meth:`DataFrame.sort_index` where parameter ascending passed as a list on a single level index gives wrong result. (:issue:`32334`) - Bug in :meth:`DataFrame.reset_index` was incorrectly raising a ``ValueError`` for input with a :class:`MultiIndex` with missing values in a level with ``Categorical`` dtype (:issue:`24206`) - Bug in indexing with boolean masks on datetime-like values sometimes returning a view instead of a copy (:issue:`36210`) - Bug in :meth:`DataFrame.__getitem__` and :meth:`DataFrame.loc.__getitem__` with :class:`IntervalIndex` columns and a numeric indexer (:issue:`26490`) - Bug in :meth:`Series.loc.__getitem__` with a non-unique :class:`MultiIndex` and an empty-list indexer (:issue:`13691`) -- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`MultiIndex` with a level named "0" (:issue:`37194`) +- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`MultiIndex` and a level named ``"0"`` (:issue:`37194`) - Bug in :meth:`Series.__getitem__` when using an unsigned integer array as an indexer giving incorrect results or segfaulting instead of raising ``KeyError`` (:issue:`37218`) - Bug in :meth:`Index.where` incorrectly casting numeric values to strings (:issue:`37591`) -- Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` raises when numeric label was given for object :class:`Index` although label was in :class:`Index` (:issue:`26491`) -- Bug in :meth:`DataFrame.loc` returned requested key plus missing values when ``loc`` was applied to single level from :class:`MultiIndex` (:issue:`27104`) +- Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` raises when the index was of ``object`` dtype and the given numeric label was in the index (:issue:`26491`) +- Bug in :meth:`DataFrame.loc` returned requested key plus missing values when ``loc`` was applied to single level from a :class:`MultiIndex` (:issue:`27104`) - Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using a listlike indexer containing NA values (:issue:`37722`) - Bug in :meth:`DataFrame.xs` ignored ``droplevel=False`` for columns (:issue:`19056`) -- Bug in :meth:`DataFrame.reindex` raising ``IndexingError`` wrongly for empty :class:`DataFrame` with ``tolerance`` not None or ``method="nearest"`` (:issue:`27315`) +- Bug in :meth:`DataFrame.reindex` raising ``IndexingError`` wrongly for empty DataFrame with ``tolerance`` not None or ``method="nearest"`` (:issue:`27315`) - Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using listlike indexer that contains elements that are in the index's ``categories`` but not in the index itself failing to raise ``KeyError`` (:issue:`37901`) - Bug in :meth:`DataFrame.iloc` and :meth:`Series.iloc` aligning objects in ``__setitem__`` (:issue:`22046`) - Bug in :meth:`DataFrame.loc` did not raise ``KeyError`` when missing combination was given with ``slice(None)`` for remaining levels (:issue:`19556`) @@ -630,7 +629,8 @@ Indexing Missing ^^^^^^^ -- Bug in :meth:`SeriesGroupBy.transform` now correctly handles missing values for ``dropna=False`` (:issue:`35014`) +- Bug in :meth:`.SeriesGroupBy.transform` now correctly handles missing values for ``dropna=False`` (:issue:`35014`) +- Bug in :meth:`Series.nunique` with ``dropna=True`` was returning incorrect results when both ``NA`` and ``None`` missing values were present (:issue:`37566`) - MultiIndex @@ -645,29 +645,29 @@ I/O ^^^ - :func:`read_sas` no longer leaks resources on failure (:issue:`35566`) -- Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`) -- In :meth:`read_csv` ``float_precision='round_trip'`` now handles ``decimal`` and ``thousands`` parameters (:issue:`35365`) +- Bug in :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`) +- Bug in :meth:`read_csv` with ``float_precision='round_trip'`` did not handle ``decimal`` and ``thousands`` parameters (:issue:`35365`) - :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`) - :meth:`to_csv` passes compression arguments for ``'gzip'`` always to ``gzip.GzipFile`` (:issue:`28103`) - :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue:`35058`) -- :meth:`to_csv` and :meth:`read_csv` did not honor ``compression`` and ``encoding`` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`) -- :meth:`to_picke` and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, and :issue:`29570`) +- :meth:`to_csv` and :meth:`read_csv` did not honor ``compression`` and ``encoding`` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, :issue:`32392`) +- :meth:`DataFrame.to_pickle`, :meth:`Series.to_pickle`, and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, :issue:`29570`) - Bug in :func:`LongTableBuilder.middle_separator` was duplicating LaTeX longtable entries in the List of Tables of a LaTeX document (:issue:`34360`) - Bug in :meth:`read_csv` with ``engine='python'`` truncating data if multiple items present in first row and first element started with BOM (:issue:`36343`) - Removed ``private_key`` and ``verbose`` from :func:`read_gbq` as they are no longer supported in ``pandas-gbq`` (:issue:`34654`, :issue:`30200`) - Bumped minimum pytables version to 3.5.1 to avoid a ``ValueError`` in :meth:`read_hdf` (:issue:`24839`) - Bug in :func:`read_table` and :func:`read_csv` when ``delim_whitespace=True`` and ``sep=default`` (:issue:`36583`) -- Bug in :meth:`to_json` with ``lines=True`` and ``orient='records'`` the last line of the record is not appended with 'new line character' (:issue:`36888`) +- Bug in :meth:`DataFrame.to_json` and :meth:`Series.to_json` when used with ``lines=True`` and ``orient='records'`` the last line of the record is not appended with 'new line character' (:issue:`36888`) - Bug in :meth:`read_parquet` with fixed offset timezones. String representation of timezones was not recognized (:issue:`35997`, :issue:`36004`) - Bug in :meth:`DataFrame.to_html`, :meth:`DataFrame.to_string`, and :meth:`DataFrame.to_latex` ignoring the ``na_rep`` argument when ``float_format`` was also specified (:issue:`9046`, :issue:`13828`) - Bug in output rendering of complex numbers showing too many trailing zeros (:issue:`36799`) -- Bug in :class:`HDFStore` threw a ``TypeError`` when exporting an empty :class:`DataFrame` with ``datetime64[ns, tz]`` dtypes with a fixed HDF5 store (:issue:`20594`) -- Bug in :class:`HDFStore` was dropping timezone information when exporting :class:`Series` with ``datetime64[ns, tz]`` dtypes with a fixed HDF5 store (:issue:`20594`) +- Bug in :class:`HDFStore` threw a ``TypeError`` when exporting an empty DataFrame with ``datetime64[ns, tz]`` dtypes with a fixed HDF5 store (:issue:`20594`) +- Bug in :class:`HDFStore` was dropping timezone information when exporting a Series with ``datetime64[ns, tz]`` dtypes with a fixed HDF5 store (:issue:`20594`) - :func:`read_csv` was closing user-provided binary file handles when ``engine="c"`` and an ``encoding`` was requested (:issue:`36980`) - Bug in :meth:`DataFrame.to_hdf` was not dropping missing rows with ``dropna=True`` (:issue:`35719`) - Bug in :func:`read_html` was raising a ``TypeError`` when supplying a ``pathlib.Path`` argument to the ``io`` parameter (:issue:`37705`) -- :meth:`to_excel` and :meth:`to_markdown` support writing to fsspec URLs such as S3 and Google Cloud Storage (:issue:`33987`) -- Bug in :meth:`read_fw` was not skipping blank lines (even with ``skip_blank_lines=True``) (:issue:`37758`) +- :meth:`DataFrame.to_excel`, :meth:`Series.to_excel`, :meth:`DataFrame.to_markdown`, and :meth:`Series.to_markdown` now support writing to fsspec URLs such as S3 and Google Cloud Storage (:issue:`33987`) +- Bug in :func:`read_fwf` with ``skip_blank_lines=True`` was not skipping blank lines (:issue:`37758`) - Parse missing values using :func:`read_json` with ``dtype=False`` to ``NaN`` instead of ``None`` (:issue:`28501`) - :meth:`read_fwf` was inferring compression with ``compression=None`` which was not consistent with the other :meth:``read_*`` functions (:issue:`37909`) - :meth:`DataFrame.to_html` was ignoring ``formatters`` argument for ``ExtensionDtype`` columns (:issue:`36525`) @@ -682,54 +682,55 @@ Plotting ^^^^^^^^ - Bug in :meth:`DataFrame.plot` was rotating xticklabels when ``subplots=True``, even if the x-axis wasn't an irregular time series (:issue:`29460`) -- Bug in :meth:`DataFrame.plot` where a marker letter in the ``style`` keyword sometimes causes a ``ValueError`` (:issue:`21003`) -- Bug in :func:`DataFrame.plot.bar` and :func:`Series.plot.bar`. Ticks position were assigned by value order instead of using the actual value for numeric, or a smart ordering for string. (:issue:`26186` and :issue:`11465`) +- Bug in :meth:`DataFrame.plot` where a marker letter in the ``style`` keyword sometimes caused a ``ValueError`` (:issue:`21003`) +- Bug in :meth:`DataFrame.plot.bar` and :meth:`Series.plot.bar` where ticks positions were assigned by value order instead of using the actual value for numeric or a smart ordering for string (:issue:`26186`, :issue:`11465`) - Twinned axes were losing their tick labels which should only happen to all but the last row or column of 'externally' shared axes (:issue:`33819`) -- Bug in :meth:`Series.plot` and :meth:`DataFrame.plot` was throwing :exc:`ValueError` with a :class:`Series` or :class:`DataFrame` - indexed by a :class:`TimedeltaIndex` with a fixed frequency when x-axis lower limit was greater than upper limit (:issue:`37454`) -- Bug in :meth:`DataFrameGroupBy.boxplot` when ``subplots=False``, a KeyError would raise (:issue:`16748`) -- Bug in :meth:`DataFrame.plot` and :meth:`Series.plot` was overwriting matplotlib's shared y axes behaviour when no sharey parameter was passed (:issue:`37942`) +- Bug in :meth:`Series.plot` and :meth:`DataFrame.plot` was throwing a :exc:`ValueError` when the Series or DataFrame was + indexed by a :class:`.TimedeltaIndex` with a fixed frequency and the x-axis lower limit was greater than the upper limit (:issue:`37454`) +- Bug in :meth:`.DataFrameGroupBy.boxplot` when ``subplots=False`` would raise a ``KeyError`` (:issue:`16748`) +- Bug in :meth:`DataFrame.plot` and :meth:`Series.plot` was overwriting matplotlib's shared y axes behaviour when no ``sharey`` parameter was passed (:issue:`37942`) + Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- Bug in :meth:`DataFrameGroupBy.count` and :meth:`SeriesGroupBy.sum` returning ``NaN`` for missing categories when grouped on multiple ``Categoricals``. Now returning ``0`` (:issue:`35028`) -- Bug in :meth:`DataFrameGroupBy.apply` that would some times throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`) -- Bug in :meth:`DataFrame.resample(...)` that would throw a ``ValueError`` when resampling from "D" to "24H" over a transition into daylight savings time (DST) (:issue:`35219`) -- Bug when combining methods :meth:`DataFrame.groupby` with :meth:`DataFrame.resample` and :meth:`DataFrame.interpolate` raising an ``TypeError`` (:issue:`35325`) -- Bug in :meth:`DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply()`` (:issue:`34656`) +- Bug in :meth:`.DataFrameGroupBy.count` and :meth:`SeriesGroupBy.sum` returning ``NaN`` for missing categories when grouped on multiple ``Categoricals``. Now returning ``0`` (:issue:`35028`) +- Bug in :meth:`.DataFrameGroupBy.apply` that would sometimes throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`) +- Bug in :meth:`DataFrame.resample` that would throw a ``ValueError`` when resampling from ``"D"`` to ``"24H"`` over a transition into daylight savings time (DST) (:issue:`35219`) +- Bug when combining methods :meth:`DataFrame.groupby` with :meth:`DataFrame.resample` and :meth:`DataFrame.interpolate` raising a ``TypeError`` (:issue:`35325`) +- Bug in :meth:`.DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply`` (:issue:`34656`) - Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`) -- Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) +- Bug in :meth:`.DataFrameGroupBy.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) - Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`) -- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) -- Bug in :meth:`Rolling.sum()` returned wrong values when dtypes where mixed between float and integer and axis was equal to one (:issue:`20649`, :issue:`35596`) -- Bug in :meth:`Rolling.count` returned ``np.nan`` with :class:`pandas.api.indexers.FixedForwardWindowIndexer` as window, ``min_periods=0`` and only missing values in window (:issue:`35579`) +- Bug in :meth:`.DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) +- Bug in :meth:`.Rolling.sum` returned wrong values when dtypes where mixed between float and integer and ``axis=1`` (:issue:`20649`, :issue:`35596`) +- Bug in :meth:`.Rolling.count` returned ``np.nan`` with :class:`~pandas.api.indexers.FixedForwardWindowIndexer` as window, ``min_periods=0`` and only missing values in the window (:issue:`35579`) - Bug where :class:`pandas.core.window.Rolling` produces incorrect window sizes when using a ``PeriodIndex`` (:issue:`34225`) -- Bug in :meth:`DataFrameGroupBy.ffill` and :meth:`DataFrameGroupBy.bfill` where a ``NaN`` group would return filled values instead of ``NaN`` when ``dropna=True`` (:issue:`34725`) -- Bug in :meth:`RollingGroupby.count` where a ``ValueError`` was raised when specifying the ``closed`` parameter (:issue:`35869`) -- Bug in :meth:`DataFrame.groupby.rolling` returning wrong values with partial centered window (:issue:`36040`). -- Bug in :meth:`DataFrameGroupBy.rolling` returned wrong values with timeaware window containing ``NaN``. Raises ``ValueError`` because windows are not monotonic now (:issue:`34617`) -- Bug in :meth:`Rolling.__iter__` where a ``ValueError`` was not raised when ``min_periods`` was larger than ``window`` (:issue:`37156`) -- Using :meth:`Rolling.var()` instead of :meth:`Rolling.std()` avoids numerical issues for :meth:`Rolling.corr()` when :meth:`Rolling.var()` is still within floating point precision while :meth:`Rolling.std()` is not (:issue:`31286`) -- Bug in :meth:`df.groupby(..).quantile() <pandas.core.groupby.DataFrameGroupBy.quantile>` and :meth:`df.resample(..).quantile() <pandas.core.resample.Resampler.quantile>` raised ``TypeError`` when values were of type ``Timedelta`` (:issue:`29485`) -- Bug in :meth:`Rolling.median` and :meth:`Rolling.quantile` returned wrong values for :class:`BaseIndexer` subclasses with non-monotonic starting or ending points for windows (:issue:`37153`) +- Bug in :meth:`.DataFrameGroupBy.ffill` and :meth:`.DataFrameGroupBy.bfill` where a ``NaN`` group would return filled values instead of ``NaN`` when ``dropna=True`` (:issue:`34725`) +- Bug in :meth:`.RollingGroupby.count` where a ``ValueError`` was raised when specifying the ``closed`` parameter (:issue:`35869`) +- Bug in :meth:`.DataFrameGroupBy.rolling` returning wrong values with partial centered window (:issue:`36040`) +- Bug in :meth:`.DataFrameGroupBy.rolling` returned wrong values with timeaware window containing ``NaN``. Raises ``ValueError`` because windows are not monotonic now (:issue:`34617`) +- Bug in :meth:`.Rolling.__iter__` where a ``ValueError`` was not raised when ``min_periods`` was larger than ``window`` (:issue:`37156`) +- Using :meth:`.Rolling.var` instead of :meth:`.Rolling.std` avoids numerical issues for :meth:`.Rolling.corr` when :meth:`.Rolling.var` is still within floating point precision while :meth:`.Rolling.std` is not (:issue:`31286`) +- Bug in :meth:`.DataFrameGroupBy.quantile` and :meth:`.Resampler.quantile` raised ``TypeError`` when values were of type ``Timedelta`` (:issue:`29485`) +- Bug in :meth:`.Rolling.median` and :meth:`.Rolling.quantile` returned wrong values for :class:`.BaseIndexer` subclasses with non-monotonic starting or ending points for windows (:issue:`37153`) - Bug in :meth:`DataFrame.groupby` dropped ``nan`` groups from result with ``dropna=False`` when grouping over a single column (:issue:`35646`, :issue:`35542`) -- Bug in :meth:`DataFrameGroupBy.head`, :meth:`DataFrameGroupBy.tail`, :meth:`SeriesGroupBy.head`, and :meth:`SeriesGroupBy.tail` would raise when used with ``axis=1`` (:issue:`9772`) -- Bug in :meth:`DataFrameGroupBy.transform` would raise when used with ``axis=1`` and a transformation kernel (e.g. "shift") (:issue:`36308`) +- Bug in :meth:`.DataFrameGroupBy.head`, :meth:`.DataFrameGroupBy.tail`, :meth:`SeriesGroupBy.head`, and :meth:`SeriesGroupBy.tail` would raise when used with ``axis=1`` (:issue:`9772`) +- Bug in :meth:`.DataFrameGroupBy.transform` would raise when used with ``axis=1`` and a transformation kernel (e.g. "shift") (:issue:`36308`) Reshaping ^^^^^^^^^ - Bug in :meth:`DataFrame.pivot_table` with ``aggfunc='count'`` or ``aggfunc='sum'`` returning ``NaN`` for missing categories when pivoted on a ``Categorical``. Now returning ``0`` (:issue:`31422`) -- Bug in :func:`union_indexes` where input index names are not preserved in some cases. Affects :func:`concat` and :class:`DataFrame` constructor (:issue:`13475`) +- Bug in :func:`concat` and :class:`DataFrame` constructor where input index names are not preserved in some cases (:issue:`13475`) - Bug in func :meth:`crosstab` when using multiple columns with ``margins=True`` and ``normalize=True`` (:issue:`35144`) - Bug in :meth:`DataFrame.agg` with ``func={'name':<FUNC>}`` incorrectly raising ``TypeError`` when ``DataFrame.columns==['Name']`` (:issue:`36212`) -- Bug in :meth:`Series.transform` would give incorrect results or raise when the argument ``func`` was dictionary (:issue:`35811`) -- Bug in :meth:`DataFrame.pivot` did not preserve :class:`MultiIndex` level names for columns when rows and columns both multiindexed (:issue:`36360`) +- Bug in :meth:`Series.transform` would give incorrect results or raise when the argument ``func`` was a dictionary (:issue:`35811`) +- Bug in :meth:`DataFrame.pivot` did not preserve :class:`MultiIndex` level names for columns when rows and columns are both multiindexed (:issue:`36360`) - Bug in :meth:`DataFrame.pivot` modified ``index`` argument when ``columns`` was passed but ``values`` was not (:issue:`37635`) -- Bug in :func:`join` returned a non deterministic level-order for the resulting :class:`MultiIndex` (:issue:`36910`) -- Bug in :meth:`DataFrame.combine_first()` caused wrong alignment with dtype ``string`` and one level of ``MultiIndex`` containing only ``NA`` (:issue:`37591`) -- Fixed regression in :func:`merge` on merging DatetimeIndex with empty DataFrame (:issue:`36895`) +- Bug in :meth:`DataFrame.join` returned a non deterministic level-order for the resulting :class:`MultiIndex` (:issue:`36910`) +- Bug in :meth:`DataFrame.combine_first` caused wrong alignment with dtype ``string`` and one level of ``MultiIndex`` containing only ``NA`` (:issue:`37591`) +- Fixed regression in :func:`merge` on merging :class:`.DatetimeIndex` with empty DataFrame (:issue:`36895`) - Bug in :meth:`DataFrame.apply` not setting index of return value when ``func`` return type is ``dict`` (:issue:`37544`) - Bug in :func:`concat` resulting in a ``ValueError`` when at least one of both inputs had a non-unique index (:issue:`36263`) - Bug in :meth:`DataFrame.merge` and :meth:`pandas.merge` returning inconsistent ordering in result for ``how=right`` and ``how=left`` (:issue:`35382`) @@ -743,26 +744,25 @@ Sparse ExtensionArray ^^^^^^^^^^^^^^ -- Fixed Bug where :class:`DataFrame` column set to scalar extension type via a dict instantion was considered an object type rather than the extension type (:issue:`35965`) -- Fixed bug where ``astype()`` with equal dtype and ``copy=False`` would return a new object (:issue:`284881`) -- Fixed bug when applying a NumPy ufunc with multiple outputs to a :class:`pandas.arrays.IntegerArray` returning None (:issue:`36913`) -- Fixed an inconsistency in :class:`PeriodArray`'s ``__init__`` signature to those of :class:`DatetimeArray` and :class:`TimedeltaArray` (:issue:`37289`) -- Reductions for :class:`BooleanArray`, :class:`Categorical`, :class:`DatetimeArray`, :class:`FloatingArray`, :class:`IntegerArray`, :class:`PeriodArray`, :class:`TimedeltaArray`, and :class:`PandasArray` are now keyword-only methods (:issue:`37541`) +- Fixed bug where :class:`DataFrame` column set to scalar extension type via a dict instantiation was considered an object type rather than the extension type (:issue:`35965`) +- Fixed bug where ``astype()`` with equal dtype and ``copy=False`` would return a new object (:issue:`28488`) +- Fixed bug when applying a NumPy ufunc with multiple outputs to an :class:`.IntegerArray` returning None (:issue:`36913`) +- Fixed an inconsistency in :class:`.PeriodArray`'s ``__init__`` signature to those of :class:`.DatetimeArray` and :class:`.TimedeltaArray` (:issue:`37289`) +- Reductions for :class:`.BooleanArray`, :class:`.Categorical`, :class:`.DatetimeArray`, :class:`.FloatingArray`, :class:`.IntegerArray`, :class:`.PeriodArray`, :class:`.TimedeltaArray`, and :class:`.PandasArray` are now keyword-only methods (:issue:`37541`) Other ^^^^^ -- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) +- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising an ``AssertionError`` instead of a ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`) - Fixed metadata propagation in :meth:`Series.abs` and ufuncs called on Series and DataFrames (:issue:`28283`) - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly casting from ``PeriodDtype`` to object dtype (:issue:`34871`) - Fixed bug in metadata propagation incorrectly copying DataFrame columns as metadata when the column name overlaps with the metadata name (:issue:`37037`) -- Fixed metadata propagation in the :class:`Series.dt`, :class:`Series.str` accessors, :class:`DataFrame.duplicated`, :class:`DataFrame.stack`, :class:`DataFrame.unstack`, :class:`DataFrame.pivot`, :class:`DataFrame.append`, :class:`DataFrame.diff`, :class:`DataFrame.applymap` and :class:`DataFrame.update` methods (:issue:`28283`) (:issue:`37381`) -- Fixed metadata propagation when selecting columns from a DataFrame with ``DataFrame.__getitem__`` (:issue:`28283`) -- Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`) -- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError``, from a bare ``Exception`` previously (:issue:`35744`) -- Bug in ``accessor.DirNamesMixin``, where ``dir(obj)`` wouldn't show attributes defined on the instance (:issue:`37173`). -- Bug in :meth:`Series.nunique` with ``dropna=True`` was returning incorrect results when both ``NA`` and ``None`` missing values were present (:issue:`37566`) +- Fixed metadata propagation in the :class:`Series.dt`, :class:`Series.str` accessors, :class:`DataFrame.duplicated`, :class:`DataFrame.stack`, :class:`DataFrame.unstack`, :class:`DataFrame.pivot`, :class:`DataFrame.append`, :class:`DataFrame.diff`, :class:`DataFrame.applymap` and :class:`DataFrame.update` methods (:issue:`28283`, :issue:`37381`) +- Fixed metadata propagation when selecting columns with ``DataFrame.__getitem__`` (:issue:`28283`) +- Bug in :meth:`Index.union` behaving differently depending on whether operand is an :class:`Index` or other list-like (:issue:`36384`) +- Passing an array with 2 or more dimensions to the :class:`Series` constructor now raises the more specific ``ValueError`` rather than a bare ``Exception`` (:issue:`35744`) +- Bug in ``dir`` where ``dir(obj)`` wouldn't show attributes defined on the instance for pandas objects (:issue:`37173`) .. ---------------------------------------------------------------------------
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Cleanup of spelling, grammar, and links. ---- cc @jorisvandenbossche, @dsaxton, @phofl: Using ``:meth:`.DataFrameGroupBy.quantile` `` (note the `.`) displays the text `DataFrameGroupBy.quantile` and sphinx finds the link to the proper page. Similar remarks apply to `Rolling` and others. From the [sphinx docs](https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html): > Also, if the name is prefixed with a dot, and no exact match is found, the target is taken as a suffix and all object names with that suffix are searched. I don't believe this was known about (at least to me!) and is different from what was aligned on in #37145, so I wanted to get thoughts here on which is preferred. While I've added many cross-links using this, `DataFrameGroupBy.quantile` is the sole example where I have replaced a preexisting link. ---- Other notable changes that are worth a mention to make sure they are correct: * Numpy is always referred to as NumPy in plain-text (no backticks) * When a method exists on only Series and DataFrame, replaced unlinked reference with a reference to each. For example, ``:meth:`read_csv` `` becomes ``:meth:`DataFrame.to_csv` and :meth:`Series.to_csv` `` * dtypes and argument values are always double-backticked * Moved the following note from Other to Missing > Bug in :meth:`Series.nunique` with ``dropna=True`` was returning incorrect results when both ``NA`` and ``None`` missing values were present (:issue:`37566`)
https://api.github.com/repos/pandas-dev/pandas/pulls/38002
2020-11-22T17:26:09Z
2020-11-25T22:27:33Z
2020-11-25T22:27:32Z
2020-12-06T14:04:00Z
DOC: Deprecate null_counts parameter of DataFrame.info
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 08c528fb484c8..14ba59d19a01f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -480,6 +480,7 @@ Deprecations - The ``how`` keyword in :meth:`PeriodIndex.astype` is deprecated and will be removed in a future version, use ``index.to_timestamp(how=how)`` instead (:issue:`37982`) - Deprecated :meth:`Index.asi8` for :class:`Index` subclasses other than :class:`DatetimeIndex`, :class:`TimedeltaIndex`, and :class:`PeriodIndex` (:issue:`37877`) - The ``inplace`` parameter of :meth:`Categorical.remove_unused_categories` is deprecated and will be removed in a future version (:issue:`37643`) +- The ``null_counts`` parameter of :meth:`DataFrame.info` is deprecated and replaced by ``show_counts``. It will be removed in a future version (:issue:`37999`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6f6d94f0e9f8e..8b388027137d3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2531,14 +2531,17 @@ def to_html( is used. By default, the setting in ``pandas.options.display.max_info_columns`` is used.""" ), - null_counts_sub=dedent( + show_counts_sub=dedent( """\ - null_counts : bool, optional + show_counts : bool, optional Whether to show the non-null counts. By default, this is shown only if the DataFrame is smaller than ``pandas.options.display.max_info_rows`` and ``pandas.options.display.max_info_columns``. A value of True always - shows the counts, and False never shows the counts.""" + shows the counts, and False never shows the counts. + null_counts : bool, optional + .. deprecated:: 1.2.0 + Use show_counts instead.""" ), examples_sub=dedent( """\ @@ -2639,8 +2642,18 @@ def info( buf: Optional[IO[str]] = None, max_cols: Optional[int] = None, memory_usage: Optional[Union[bool, str]] = None, + show_counts: Optional[bool] = None, null_counts: Optional[bool] = None, ) -> None: + if null_counts is not None: + if show_counts is not None: + raise ValueError("null_counts used with show_counts. Use show_counts.") + warnings.warn( + "null_counts is deprecated. Use show_counts instead", + FutureWarning, + stacklevel=2, + ) + show_counts = null_counts info = DataFrameInfo( data=self, memory_usage=memory_usage, @@ -2649,7 +2662,7 @@ def info( buf=buf, max_cols=max_cols, verbose=verbose, - show_counts=null_counts, + show_counts=show_counts, ) def memory_usage(self, index=True, deep=False) -> Series: diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py index 563dbaa06e526..98bd159c567b1 100644 --- a/pandas/io/formats/info.py +++ b/pandas/io/formats/info.py @@ -203,7 +203,7 @@ def render( consume the same memory amount for corresponding dtypes. With deep memory introspection, a real memory usage calculation is performed at the cost of computational resources. - %(null_counts_sub)s + %(show_counts_sub)s Returns ------- diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 53d38297eafba..4f2cd6d0f80fe 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -177,9 +177,9 @@ def test_show_null_counts(self): df = DataFrame(1, columns=range(10), index=range(10)) df.iloc[1, 1] = np.nan - def check(null_counts, result): + def check(show_counts, result): buf = StringIO() - df.info(buf=buf, null_counts=null_counts) + df.info(buf=buf, show_counts=show_counts) assert ("non-null" in buf.getvalue()) is result with option_context( @@ -194,6 +194,18 @@ def check(null_counts, result): check(True, False) check(False, False) + # GH37999 + with tm.assert_produces_warning( + FutureWarning, match="null_counts is deprecated.+" + ): + buf = StringIO() + df.info(buf=buf, null_counts=True) + assert "non-null" in buf.getvalue() + + # GH37999 + with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"): + df.info(null_counts=True, show_counts=True) + def test_repr_truncation(self): max_len = 20 with option_context("display.max_colwidth", max_len): diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py index 8c2155aec7248..7befe85850014 100644 --- a/pandas/tests/io/formats/test_info.py +++ b/pandas/tests/io/formats/test_info.py @@ -161,7 +161,7 @@ def test_info_verbose_with_counts_spacing( """Test header column, spacer, first line and last line in verbose mode.""" frame = DataFrame(np.random.randn(3, size)) buf = StringIO() - frame.info(verbose=True, null_counts=True, buf=buf) + frame.info(verbose=True, show_counts=True, buf=buf) all_lines = buf.getvalue().splitlines() # Here table would contain only header, separator and table lines # dframe repr, index summary, memory usage and dtypes are excluded @@ -480,7 +480,7 @@ def test_info_int_columns(): # GH#37245 df = DataFrame({1: [1, 2], 2: [2, 3]}, index=["A", "B"]) buf = StringIO() - df.info(null_counts=True, buf=buf) + df.info(show_counts=True, buf=buf) result = buf.getvalue() expected = textwrap.dedent( """\
- [x] closes #36805 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` ### Background. > Might be worth it actually - I _think_ people are only likely to use `.info` in an interactive session, rather than as part of production code, so perhaps it is worth going through the deprecation process to make this argument clearer * xref https://github.com/pandas-dev/pandas/issues/36805#issuecomment-702754290 * Deprecate `null_counts` parameter of DataFrame.info in favor of `show_counts` and add warning. * Add whatsnew and add tests to test for FutureWarning. Move usages of null_counts in tests to show_counts. Doc preview: <img width="755" alt="Screenshot 2020-11-22 at 6 19 05 PM" src="https://user-images.githubusercontent.com/19281800/99901143-63512400-2cef-11eb-846a-447050de6d8f.png">
https://api.github.com/repos/pandas-dev/pandas/pulls/37999
2020-11-22T10:15:24Z
2020-11-25T20:59:23Z
2020-11-25T20:59:23Z
2022-10-29T01:46:03Z
DOC: add a link to new styler method
diff --git a/doc/source/reference/style.rst b/doc/source/reference/style.rst index 24a47336b0522..e80dc1b57ff80 100644 --- a/doc/source/reference/style.rst +++ b/doc/source/reference/style.rst @@ -36,6 +36,7 @@ Style application Styler.where Styler.format Styler.set_precision + Styler.set_td_classes Styler.set_table_styles Styler.set_table_attributes Styler.set_caption diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 4b7a5e76cb475..298a7836bcb58 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -565,7 +565,6 @@ def set_td_classes(self, classes: DataFrame) -> "Styler": ' <tr><td class="data row0 col0 other-class" >1</td></tr>' ' </tbody>' '</table>' - """ classes = classes.reindex_like(self.data)
Adds a documentation link for the new `Styler` method from #36159
https://api.github.com/repos/pandas-dev/pandas/pulls/37998
2020-11-22T08:44:20Z
2020-11-23T13:26:13Z
2020-11-23T13:26:13Z
2021-01-26T08:08:42Z
BUG: crosstab with duplicate column or index labels
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 049ccc0e6c4df..b16888338cda5 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -729,6 +729,7 @@ Groupby/resample/rolling Reshaping ^^^^^^^^^ +- Bug in :meth:`DataFrame.crosstab` was returning incorrect results on inputs with duplicate row names, duplicate column names or duplicate names between row and column labels (:issue:`22529`) - Bug in :meth:`DataFrame.pivot_table` with ``aggfunc='count'`` or ``aggfunc='sum'`` returning ``NaN`` for missing categories when pivoted on a ``Categorical``. Now returning ``0`` (:issue:`31422`) - Bug in :func:`concat` and :class:`DataFrame` constructor where input index names are not preserved in some cases (:issue:`13475`) - Bug in func :meth:`crosstab` when using multiple columns with ``margins=True`` and ``normalize=True`` (:issue:`35144`) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index c1198cdfcda81..22887cede51ed 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -5,6 +5,7 @@ List, Optional, Sequence, + Set, Tuple, Union, cast, @@ -578,29 +579,37 @@ def crosstab( b 0 1 0 c 0 0 0 """ + if values is None and aggfunc is not None: + raise ValueError("aggfunc cannot be used without values.") + + if values is not None and aggfunc is None: + raise ValueError("values cannot be used without an aggfunc.") + index = com.maybe_make_list(index) columns = com.maybe_make_list(columns) - rownames = _get_names(index, rownames, prefix="row") - colnames = _get_names(columns, colnames, prefix="col") - common_idx = None pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))] if pass_objs: common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False) - data: Dict = {} - data.update(zip(rownames, index)) - data.update(zip(colnames, columns)) - - if values is None and aggfunc is not None: - raise ValueError("aggfunc cannot be used without values.") + rownames = _get_names(index, rownames, prefix="row") + colnames = _get_names(columns, colnames, prefix="col") - if values is not None and aggfunc is None: - raise ValueError("values cannot be used without an aggfunc.") + # duplicate names mapped to unique names for pivot op + ( + rownames_mapper, + unique_rownames, + colnames_mapper, + unique_colnames, + ) = _build_names_mapper(rownames, colnames) from pandas import DataFrame + data = { + **dict(zip(unique_rownames, index)), + **dict(zip(unique_colnames, columns)), + } df = DataFrame(data, index=common_idx) original_df_cols = df.columns @@ -613,8 +622,8 @@ def crosstab( table = df.pivot_table( ["__dummy__"], - index=rownames, - columns=colnames, + index=unique_rownames, + columns=unique_colnames, margins=margins, margins_name=margins_name, dropna=dropna, @@ -633,6 +642,9 @@ def crosstab( table, normalize=normalize, margins=margins, margins_name=margins_name ) + table = table.rename_axis(index=rownames_mapper, axis=0) + table = table.rename_axis(columns=colnames_mapper, axis=1) + return table @@ -731,3 +743,57 @@ def _get_names(arrs, names, prefix: str = "row"): names = list(names) return names + + +def _build_names_mapper( + rownames: List[str], colnames: List[str] +) -> Tuple[Dict[str, str], List[str], Dict[str, str], List[str]]: + """ + Given the names of a DataFrame's rows and columns, returns a set of unique row + and column names and mappers that convert to original names. + + A row or column name is replaced if it is duplicate among the rows of the inputs, + among the columns of the inputs or between the rows and the columns. + + Paramters + --------- + rownames: list[str] + colnames: list[str] + + Returns + ------- + Tuple(Dict[str, str], List[str], Dict[str, str], List[str]) + + rownames_mapper: dict[str, str] + a dictionary with new row names as keys and original rownames as values + unique_rownames: list[str] + a list of rownames with duplicate names replaced by dummy names + colnames_mapper: dict[str, str] + a dictionary with new column names as keys and original column names as values + unique_colnames: list[str] + a list of column names with duplicate names replaced by dummy names + + """ + + def get_duplicates(names): + seen: Set = set() + return {name for name in names if name not in seen} + + shared_names = set(rownames).intersection(set(colnames)) + dup_names = get_duplicates(rownames) | get_duplicates(colnames) | shared_names + + rownames_mapper = { + f"row_{i}": name for i, name in enumerate(rownames) if name in dup_names + } + unique_rownames = [ + f"row_{i}" if name in dup_names else name for i, name in enumerate(rownames) + ] + + colnames_mapper = { + f"col_{i}": name for i, name in enumerate(colnames) if name in dup_names + } + unique_colnames = [ + f"col_{i}" if name in dup_names else name for i, name in enumerate(colnames) + ] + + return rownames_mapper, unique_rownames, colnames_mapper, unique_colnames diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py index 5f6037276b31c..6faf64789c687 100644 --- a/pandas/tests/reshape/test_crosstab.py +++ b/pandas/tests/reshape/test_crosstab.py @@ -535,15 +535,32 @@ def test_crosstab_with_numpy_size(self): ) tm.assert_frame_equal(result, expected) - def test_crosstab_dup_index_names(self): - # GH 13279 - s = Series(range(3), name="foo") + def test_crosstab_duplicate_names(self): + # GH 13279 / 22529 + + s1 = Series(range(3), name="foo") + s2_foo = Series(range(1, 4), name="foo") + s2_bar = Series(range(1, 4), name="bar") + s3 = Series(range(3), name="waldo") + + # check result computed with duplicate labels against + # result computed with unique labels, then relabelled + mapper = {"bar": "foo"} + + # duplicate row, column labels + result = crosstab(s1, s2_foo) + expected = crosstab(s1, s2_bar).rename_axis(columns=mapper, axis=1) + tm.assert_frame_equal(result, expected) + + # duplicate row, unique column labels + result = crosstab([s1, s2_foo], s3) + expected = crosstab([s1, s2_bar], s3).rename_axis(index=mapper, axis=0) + tm.assert_frame_equal(result, expected) + + # unique row, duplicate column labels + result = crosstab(s3, [s1, s2_foo]) + expected = crosstab(s3, [s1, s2_bar]).rename_axis(columns=mapper, axis=1) - result = crosstab(s, s) - expected_index = Index(range(3), name="foo") - expected = DataFrame( - np.eye(3, dtype=np.int64), index=expected_index, columns=expected_index - ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("names", [["a", ("b", "c")], [("a", "b"), "c"]])
- [x] closes #22529 - [x] tests added / passed - [x] passes black pandas - [x] passes git diff upstream/master -u -- "*.py" | flake8 --diff - [x] whatsnew entry Picking up from #28474 cc @jreback in case this can go in in time for 1.2
https://api.github.com/repos/pandas-dev/pandas/pulls/37997
2020-11-22T05:12:56Z
2020-11-28T17:29:04Z
2020-11-28T17:29:04Z
2020-11-28T17:29:11Z
Removed period to make it more Consistent
diff --git a/README.md b/README.md index a2f2f1c04442a..4072faffe3b3a 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ Here are just a few of the things that pandas does well: and saving/loading data from the ultrafast [**HDF5 format**][hdfstore] - [**Time series**][timeseries]-specific functionality: date range generation and frequency conversion, moving window statistics, - date shifting and lagging. + date shifting and lagging [missing-data]: https://pandas.pydata.org/pandas-docs/stable/missing_data.html#working-with-missing-data
Removed period as above lines don't have it to make it more consistent
https://api.github.com/repos/pandas-dev/pandas/pulls/37995
2020-11-22T04:38:01Z
2020-11-24T03:15:06Z
2020-11-24T03:15:06Z
2020-11-24T03:15:10Z
CLN: Use new hashtables in libindex to avoid casting
diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in index c7b67667bda17..69680e472bbc2 100644 --- a/pandas/_libs/index_class_helper.pxi.in +++ b/pandas/_libs/index_class_helper.pxi.in @@ -10,21 +10,21 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in {{py: -# name, dtype, hashtable_name -dtypes = [('Float64', 'float64', 'Float64'), - ('Float32', 'float32', 'Float64'), - ('Int64', 'int64', 'Int64'), - ('Int32', 'int32', 'Int64'), - ('Int16', 'int16', 'Int64'), - ('Int8', 'int8', 'Int64'), - ('UInt64', 'uint64', 'UInt64'), - ('UInt32', 'uint32', 'UInt64'), - ('UInt16', 'uint16', 'UInt64'), - ('UInt8', 'uint8', 'UInt64'), +# name, dtype +dtypes = [('Float64', 'float64'), + ('Float32', 'float32'), + ('Int64', 'int64'), + ('Int32', 'int32'), + ('Int16', 'int16'), + ('Int8', 'int8'), + ('UInt64', 'uint64'), + ('UInt32', 'uint32'), + ('UInt16', 'uint16'), + ('UInt8', 'uint8'), ] }} -{{for name, dtype, hashtable_name in dtypes}} +{{for name, dtype in dtypes}} cdef class {{name}}Engine(IndexEngine): @@ -32,7 +32,7 @@ cdef class {{name}}Engine(IndexEngine): # returns an ndarray with dtype {{dtype}}_t cdef _make_hash_table(self, Py_ssize_t n): - return _hash.{{hashtable_name}}HashTable(n) + return _hash.{{name}}HashTable(n) {{if name not in {'Float64', 'Float32'} }} cdef _check_type(self, object val): @@ -41,9 +41,7 @@ cdef class {{name}}Engine(IndexEngine): {{endif}} cdef void _call_map_locations(self, values): - # self.mapping is of type {{hashtable_name}}HashTable, - # so convert dtype of values - self.mapping.map_locations(algos.ensure_{{hashtable_name.lower()}}(values)) + self.mapping.map_locations(algos.ensure_{{name.lower()}}(values)) cdef _maybe_get_bool_indexer(self, object val): cdef:
Small, hard-to-measure perf improvement, since it only makes a difference on the first lookup: ``` In [2]: rng = pd.Index(range(50)).repeat(20).astype("category") In [14]: %timeit rng._engine.clear_mapping(); rng.get_loc(40) 24.8 µs ± 624 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- master 21.3 µs ± 296 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/37994
2020-11-22T02:37:53Z
2020-11-24T03:14:38Z
2020-11-24T03:14:38Z
2020-11-24T15:20:33Z
[WIP] DOC: MultiIndex EX01 errors
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5eb890c9817c0..da57fd8f0da78 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1575,6 +1575,33 @@ def droplevel(self, level=0): Returns ------- Index or MultiIndex + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays( + ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) + >>> mi + MultiIndex([(1, 3, 5), + (2, 4, 6)], + names=['x', 'y', 'z']) + + >>> mi.droplevel() + MultiIndex([(3, 5), + (4, 6)], + names=['y', 'z']) + + >>> mi.droplevel(2) + MultiIndex([(1, 3), + (2, 4)], + names=['x', 'y']) + + >>> mi.droplevel('z') + MultiIndex([(1, 3), + (2, 4)], + names=['x', 'y']) + + >>> mi.droplevel(['x', 'y']) + Int64Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 9eb34d920a328..d5f82a9791cf3 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -893,6 +893,15 @@ def set_levels(self, levels, level=None, inplace=None, verify_integrity=True): def nlevels(self) -> int: """ Integer number of levels in this MultiIndex. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']]) + >>> mi + MultiIndex([('a', 'b', 'c')], + ) + >>> mi.nlevels + 3 """ return len(self._levels) @@ -900,6 +909,15 @@ def nlevels(self) -> int: def levshape(self): """ A tuple with the length of each level. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']]) + >>> mi + MultiIndex([('a', 'b', 'c')], + ) + >>> mi.levshape + (1, 1, 1) """ return tuple(len(x) for x in self.levels) @@ -1457,7 +1475,22 @@ def _set_names(self, names, level=None, validate=True): self._reset_cache() names = property( - fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n""" + fset=_set_names, + fget=_get_names, + doc=""" + Names of levels in MultiIndex. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays( + ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) + >>> mi + MultiIndex([(1, 3, 5), + (2, 4, 6)], + names=['x', 'y', 'z']) + >>> mi.names + FrozenList(['x', 'y', 'z']) + """, ) # -------------------------------------------------------------------- @@ -1701,6 +1734,32 @@ def to_frame(self, index=True, name=None): -------- DataFrame : Two-dimensional, size-mutable, potentially heterogeneous tabular data. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']]) + >>> mi + MultiIndex([('a', 'c'), + ('b', 'd')], + ) + + >>> df = mi.to_frame() + >>> df + 0 1 + a c a c + b d b d + + >>> df = mi.to_frame(index=False) + >>> df + 0 1 + 0 a c + 1 b d + + >>> df = mi.to_frame(name=['x', 'y']) + >>> df + x y + a c a c + b d b d """ from pandas import DataFrame @@ -2238,6 +2297,24 @@ def reorder_levels(self, order): Returns ------- MultiIndex + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y']) + >>> mi + MultiIndex([(1, 3), + (2, 4)], + names=['x', 'y']) + + >>> mi.reorder_levels(order=[1, 0]) + MultiIndex([(3, 1), + (4, 2)], + names=['y', 'x']) + + >>> mi.reorder_levels(order=['y', 'x']) + MultiIndex([(3, 1), + (4, 2)], + names=['y', 'x']) """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: @@ -2296,6 +2373,34 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True): Resulting index. indexer : np.ndarray Indices of output values in original index. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]]) + >>> mi + MultiIndex([(0, 2), + (0, 1)], + ) + + >>> mi.sortlevel() + (MultiIndex([(0, 1), + (0, 2)], + ), array([1, 0])) + + >>> mi.sortlevel(sort_remaining=False) + (MultiIndex([(0, 2), + (0, 1)], + ), array([0, 1])) + + >>> mi.sortlevel(1) + (MultiIndex([(0, 1), + (0, 2)], + ), array([1, 0])) + + >>> mi.sortlevel(1, ascending=False) + (MultiIndex([(0, 2), + (0, 1)], + ), array([0, 1])) """ if isinstance(level, (str, int)): level = [level]
- [x] xref #37875, #27977 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37993
2020-11-22T01:28:44Z
2020-11-23T13:37:29Z
2020-11-23T13:37:29Z
2020-11-23T17:09:52Z
Bug in DataFrame.loc returning elements in wrong order when indexer is differently ordered than object
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ad5af5df710ba..fbd209d73d933 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -621,6 +621,7 @@ Indexing - Bug in :meth:`DataFrame.iloc` and :meth:`Series.iloc` aligning objects in ``__setitem__`` (:issue:`22046`) - Bug in :meth:`DataFrame.loc` did not raise ``KeyError`` when missing combination was given with ``slice(None)`` for remaining levels (:issue:`19556`) - Bug in :meth:`DataFrame.loc` raising ``TypeError`` when non-integer slice was given to select values from :class:`MultiIndex` (:issue:`25165`, :issue:`24263`) +- Bug in :meth:`DataFrame.loc` returning and assigning elements in wrong order when indexer is differently ordered than the :class:`MultiIndex` to filter (:issue:`31330`, :issue:`34603`) - Bug in :meth:`DataFrame.loc` and :meth:`DataFrame.__getitem__` raising ``KeyError`` when columns were :class:`MultiIndex` with only one level (:issue:`29749`) Missing diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index be0b0c5208b1c..4b67deb2d102c 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3262,7 +3262,7 @@ def _update_indexer( self._get_level_indexer(x, level=i, indexer=indexer) ) indexers = (idxrs if indexers is None else indexers).union( - idxrs + idxrs, sort=False ) except KeyError: @@ -3349,6 +3349,9 @@ def _reorder_indexer( # order they appears in a list-like sequence # This mapping is then use to reorder the indexer for i, k in enumerate(seq): + if is_scalar(k): + # GH#34603 we want to treat a scalar the same as an all equal list + k = [k] if com.is_bool_indexer(k): new_order = np.arange(n)[indexer] elif is_list_like(k): @@ -3362,6 +3365,9 @@ def _reorder_indexer( key_order_map[level_indexer] = np.arange(len(level_indexer)) new_order = key_order_map[self.codes[i][indexer]] + elif isinstance(k, slice) and k.start is None and k.stop is None: + # slice(None) should not determine order GH#31330 + new_order = np.ones((n,))[indexer] else: # For all other case, use the same order as the level new_order = np.arange(n)[indexer] diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index cd6176722245b..42525fc575397 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -668,3 +668,30 @@ def test_get_loc_datetime_index(): # Check if get_loc matches for Index and MultiIndex assert mi.get_loc("2001-01") == slice(0, 31, None) assert index.get_loc("2001-01") == slice(0, 31, None) + + +def test_loc_setitem_indexer_differently_ordered(): + # GH#34603 + mi = MultiIndex.from_product([["a", "b"], [0, 1]]) + df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=mi) + + indexer = ("a", [1, 0]) + df.loc[indexer, :] = np.array([[9, 10], [11, 12]]) + expected = DataFrame([[11, 12], [9, 10], [5, 6], [7, 8]], index=mi) + tm.assert_frame_equal(df, expected) + + +def test_loc_getitem_index_differently_ordered_slice_none(): + # GH#31330 + df = DataFrame( + [[1, 2], [3, 4], [5, 6], [7, 8]], + index=[["a", "a", "b", "b"], [1, 2, 1, 2]], + columns=["a", "b"], + ) + result = df.loc[(slice(None), [2, 1]), :] + expected = DataFrame( + [[3, 4], [7, 8], [1, 2], [5, 6]], + index=[["a", "b", "a", "b"], [2, 2, 1, 1]], + columns=["a", "b"], + ) + tm.assert_frame_equal(result, expected)
- [x] closes #34603 - [x] closes #31330 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37992
2020-11-22T01:01:47Z
2020-11-25T20:51:29Z
2020-11-25T20:51:28Z
2020-11-25T20:52:30Z
CLN: always pass ndim to make_block
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 1f348ca0b0ece..f6ff38201fdfa 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -124,7 +124,16 @@ def _simple_new( obj._mgr_locs = placement return obj - def __init__(self, values, placement, ndim=None): + def __init__(self, values, placement, ndim: int): + """ + Parameters + ---------- + values : np.ndarray or ExtensionArray + placement : BlockPlacement (or castable) + ndim : int + 1 for SingleBlockManager/Series, 2 for BlockManager/DataFrame + """ + # TODO(EA2D): ndim will be unnecessary with 2D EAs self.ndim = self._check_ndim(values, ndim) self.mgr_locs = placement self.values = self._maybe_coerce_values(values) @@ -1646,7 +1655,7 @@ class ExtensionBlock(Block): values: ExtensionArray - def __init__(self, values, placement, ndim=None): + def __init__(self, values, placement, ndim: int): """ Initialize a non-consolidatable block. @@ -2172,7 +2181,9 @@ def diff(self, n: int, axis: int = 0) -> List["Block"]: values = self.array_values().reshape(self.shape) new_values = values - values.shift(n, axis=axis) - return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] + return [ + TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer, ndim=self.ndim) + ] def shift(self, periods, axis=0, fill_value=None): # TODO(EA2D) this is unnecessary if these blocks are backed by 2D EAs @@ -2623,6 +2634,7 @@ def get_block_type(values, dtype=None): elif is_interval_dtype(dtype) or is_period_dtype(dtype): cls = ObjectValuesExtensionBlock elif is_extension_array_dtype(values.dtype): + # Note: need to be sure PandasArray is unwrapped before we get here cls = ExtensionBlock elif issubclass(vtype, np.floating): cls = FloatBlock diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 205af5354d333..06de1972b4c9a 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -82,6 +82,7 @@ def concatenate_block_managers( b = make_block( _concatenate_join_units(join_units, concat_axis, copy=copy), placement=placement, + ndim=len(axes), ) blocks.append(b) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index bcafa2c2fdca7..909efe2233b53 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -225,7 +225,8 @@ def init_ndarray(values, index, columns, dtype: Optional[DtypeObj], copy: bool): # TODO: What about re-joining object columns? block_values = [ - make_block(dvals_list[n], placement=[n]) for n in range(len(dvals_list)) + make_block(dvals_list[n], placement=[n], ndim=2) + for n in range(len(dvals_list)) ] else: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 155d88d6ec2d9..760765e3a20e6 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -33,7 +33,7 @@ ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ExtensionDtype -from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries +from pandas.core.dtypes.generic import ABCDataFrame, ABCPandasArray, ABCSeries from pandas.core.dtypes.missing import array_equals, isna import pandas.core.algorithms as algos @@ -1432,7 +1432,7 @@ def _make_na_block(self, placement, fill_value=None): dtype, fill_value = infer_dtype_from_scalar(fill_value) block_values = np.empty(block_shape, dtype=dtype) block_values.fill(fill_value) - return make_block(block_values, placement=placement) + return make_block(block_values, placement=placement, ndim=block_values.ndim) def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True): """ @@ -1655,7 +1655,9 @@ def create_block_manager_from_blocks(blocks, axes: List[Index]) -> BlockManager: # is basically "all items", but if there're many, don't bother # converting, it's an error anyway. blocks = [ - make_block(values=blocks[0], placement=slice(0, len(axes[0]))) + make_block( + values=blocks[0], placement=slice(0, len(axes[0])), ndim=2 + ) ] mgr = BlockManager(blocks, axes) @@ -1675,8 +1677,11 @@ def create_block_manager_from_arrays( assert isinstance(axes, list) assert all(isinstance(x, Index) for x in axes) + # ensure we dont have any PandasArrays when we call get_block_type + # Note: just calling extract_array breaks tests that patch PandasArray._typ. + arrays = [x if not isinstance(x, ABCPandasArray) else x.to_numpy() for x in arrays] try: - blocks = form_blocks(arrays, names, axes) + blocks = _form_blocks(arrays, names, axes) mgr = BlockManager(blocks, axes) mgr._consolidate_inplace() return mgr @@ -1708,7 +1713,7 @@ def construction_error(tot_items, block_shape, axes, e=None): # ----------------------------------------------------------------------- -def form_blocks(arrays, names: Index, axes) -> List[Block]: +def _form_blocks(arrays, names: Index, axes) -> List[Block]: # put "leftover" items in float bucket, where else? # generalize? items_dict: DefaultDict[str, List] = defaultdict(list) @@ -1755,7 +1760,7 @@ def form_blocks(arrays, names: Index, axes) -> List[Block]: if len(items_dict["DatetimeTZBlock"]): dttz_blocks = [ - make_block(array, klass=DatetimeTZBlock, placement=i) + make_block(array, klass=DatetimeTZBlock, placement=i, ndim=2) for i, _, array in items_dict["DatetimeTZBlock"] ] blocks.extend(dttz_blocks) @@ -1770,15 +1775,14 @@ def form_blocks(arrays, names: Index, axes) -> List[Block]: if len(items_dict["CategoricalBlock"]) > 0: cat_blocks = [ - make_block(array, klass=CategoricalBlock, placement=i) + make_block(array, klass=CategoricalBlock, placement=i, ndim=2) for i, _, array in items_dict["CategoricalBlock"] ] blocks.extend(cat_blocks) if len(items_dict["ExtensionBlock"]): - external_blocks = [ - make_block(array, klass=ExtensionBlock, placement=i) + make_block(array, klass=ExtensionBlock, placement=i, ndim=2) for i, _, array in items_dict["ExtensionBlock"] ] @@ -1786,7 +1790,7 @@ def form_blocks(arrays, names: Index, axes) -> List[Block]: if len(items_dict["ObjectValuesExtensionBlock"]): external_blocks = [ - make_block(array, klass=ObjectValuesExtensionBlock, placement=i) + make_block(array, klass=ObjectValuesExtensionBlock, placement=i, ndim=2) for i, _, array in items_dict["ObjectValuesExtensionBlock"] ] @@ -1799,7 +1803,7 @@ def form_blocks(arrays, names: Index, axes) -> List[Block]: block_values = np.empty(shape, dtype=object) block_values.fill(np.nan) - na_block = make_block(block_values, placement=extra_locs) + na_block = make_block(block_values, placement=extra_locs, ndim=2) blocks.append(na_block) return blocks @@ -1816,7 +1820,7 @@ def _simple_blockify(tuples, dtype) -> List[Block]: if dtype is not None and values.dtype != dtype: # pragma: no cover values = values.astype(dtype) - block = make_block(values, placement=placement) + block = make_block(values, placement=placement, ndim=2) return [block] @@ -1830,7 +1834,7 @@ def _multi_blockify(tuples, dtype=None): values, placement = _stack_arrays(list(tup_block), dtype) - block = make_block(values, placement=placement) + block = make_block(values, placement=placement, ndim=2) new_blocks.append(block) return new_blocks @@ -1921,7 +1925,7 @@ def _merge_blocks( new_values = new_values[argsort] new_mgr_locs = new_mgr_locs[argsort] - return [make_block(new_values, placement=new_mgr_locs)] + return [make_block(new_values, placement=new_mgr_locs, ndim=2)] # can't consolidate --> no merge return blocks diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py index e98545daaf049..693d0645c9519 100644 --- a/pandas/tests/extension/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -17,7 +17,7 @@ def df(): df1 = pd.DataFrame({"a": [1, 2, 3]}) blocks = df1._mgr.blocks values = np.arange(3, dtype="int64") - custom_block = CustomBlock(values, placement=slice(1, 2)) + custom_block = CustomBlock(values, placement=slice(1, 2), ndim=2) blocks = blocks + (custom_block,) block_manager = BlockManager(blocks, [pd.Index(["a", "b"]), df1.index]) return pd.DataFrame(block_manager) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index d069b5aa08e22..d7580e9f8610e 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -267,7 +267,7 @@ def test_delete(self): def test_split(self): # GH#37799 values = np.random.randn(3, 4) - blk = make_block(values, placement=[3, 1, 6]) + blk = make_block(values, placement=[3, 1, 6], ndim=2) result = blk._split() # check that we get views, not copies @@ -276,9 +276,9 @@ def test_split(self): assert len(result) == 3 expected = [ - make_block(values[[0]], placement=[3]), - make_block(values[[1]], placement=[1]), - make_block(values[[2]], placement=[6]), + make_block(values[[0]], placement=[3], ndim=2), + make_block(values[[1]], placement=[1], ndim=2), + make_block(values[[2]], placement=[6], ndim=2), ] for res, exp in zip(result, expected): assert_block_equal(res, exp) @@ -342,7 +342,9 @@ def test_categorical_block_pickle(self): def test_iget(self): cols = Index(list("abc")) values = np.random.rand(3, 3) - block = make_block(values=values.copy(), placement=np.arange(3)) + block = make_block( + values=values.copy(), placement=np.arange(3), ndim=values.ndim + ) mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)]) tm.assert_almost_equal(mgr.iget(0).internal_values(), values[0]) @@ -1150,17 +1152,17 @@ def test_make_block_no_pandas_array(): arr = pd.arrays.PandasArray(np.array([1, 2])) # PandasArray, no dtype - result = make_block(arr, slice(len(arr))) + result = make_block(arr, slice(len(arr)), ndim=arr.ndim) assert result.is_integer is True assert result.is_extension is False # PandasArray, PandasDtype - result = make_block(arr, slice(len(arr)), dtype=arr.dtype) + result = make_block(arr, slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim) assert result.is_integer is True assert result.is_extension is False # ndarray, PandasDtype - result = make_block(arr.to_numpy(), slice(len(arr)), dtype=arr.dtype) + result = make_block(arr.to_numpy(), slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim) assert result.is_integer is True assert result.is_extension is False
Found along the way that we can incorrectly put an ndarray[object] into an ExtensionBlock if we're not careful. This will fix some of those, will make another branch that fixes the rest (with tests)
https://api.github.com/repos/pandas-dev/pandas/pulls/37991
2020-11-22T00:05:41Z
2020-11-24T03:14:04Z
2020-11-24T03:14:04Z
2020-11-24T04:31:52Z
CLN: avoid try/except in Index methods
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7658230d9e1dd..a296310d92ff1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2490,12 +2490,10 @@ def _get_unique_index(self, dropna: bool = False): else: values = self._values - if dropna: - try: - if self.hasnans: - values = values[~isna(values)] - except NotImplementedError: - pass + if dropna and not isinstance(self, ABCMultiIndex): + # isna not defined for MultiIndex + if self.hasnans: + values = values[~isna(values)] return self._shallow_copy(values) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index bca6661f54900..c3b9f63c17d89 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -27,7 +27,6 @@ from pandas.core import algorithms from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin -from pandas.core.base import IndexOpsMixin import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs @@ -217,10 +216,6 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): result._data._freq = freq return result - @doc(IndexOpsMixin.searchsorted, klass="Datetime-like Index") - def searchsorted(self, value, side="left", sorter=None): - return self._data.searchsorted(value, side=side, sorter=sorter) - _can_hold_na = True _na_value = NaT @@ -256,23 +251,23 @@ def min(self, axis=None, skipna=True, *args, **kwargs): return self._na_value i8 = self.asi8 - try: + + if len(i8) and self.is_monotonic_increasing: # quick check - if len(i8) and self.is_monotonic: - if i8[0] != iNaT: - return self._data._box_func(i8[0]) - - if self.hasnans: - if skipna: - min_stamp = self[~self._isnan].asi8.min() - else: - return self._na_value - else: - min_stamp = i8.min() - return self._data._box_func(min_stamp) - except ValueError: + if i8[0] != iNaT: + return self._data._box_func(i8[0]) + + if self.hasnans: + if not skipna: + return self._na_value + i8 = i8[~self._isnan] + + if not len(i8): return self._na_value + min_stamp = i8.min() + return self._data._box_func(min_stamp) + def argmin(self, axis=None, skipna=True, *args, **kwargs): """ Returns the indices of the minimum values along an axis. @@ -313,23 +308,23 @@ def max(self, axis=None, skipna=True, *args, **kwargs): return self._na_value i8 = self.asi8 - try: + + if len(i8) and self.is_monotonic: # quick check - if len(i8) and self.is_monotonic: - if i8[-1] != iNaT: - return self._data._box_func(i8[-1]) - - if self.hasnans: - if skipna: - max_stamp = self[~self._isnan].asi8.max() - else: - return self._na_value - else: - max_stamp = i8.max() - return self._data._box_func(max_stamp) - except ValueError: + if i8[-1] != iNaT: + return self._data._box_func(i8[-1]) + + if self.hasnans: + if not skipna: + return self._na_value + i8 = i8[~self._isnan] + + if not len(i8): return self._na_value + max_stamp = i8.max() + return self._data._box_func(max_stamp) + def argmax(self, axis=None, skipna=True, *args, **kwargs): """ Returns the indices of the maximum values along an axis. @@ -463,7 +458,7 @@ def _partial_date_slice( vals = self._data._ndarray unbox = self._data._unbox - if self.is_monotonic: + if self.is_monotonic_increasing: if len(self) and ( (t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
https://api.github.com/repos/pandas-dev/pandas/pulls/37990
2020-11-22T00:02:18Z
2020-11-23T13:32:24Z
2020-11-23T13:32:24Z
2020-11-23T15:13:49Z
TST: Series construction from ExtensionDtype scalar
diff --git a/pandas/conftest.py b/pandas/conftest.py index 77e9af67590a6..12682a68fe177 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -33,8 +33,10 @@ import pandas.util._test_decorators as td +from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype + import pandas as pd -from pandas import DataFrame, Series +from pandas import DataFrame, Interval, Period, Series, Timedelta, Timestamp import pandas._testing as tm from pandas.core import ops from pandas.core.indexes.api import Index, MultiIndex @@ -687,6 +689,26 @@ def float_frame(): return DataFrame(tm.getSeriesData()) +# ---------------------------------------------------------------- +# Scalars +# ---------------------------------------------------------------- +@pytest.fixture( + params=[ + (Interval(left=0, right=5), IntervalDtype("int64")), + (Interval(left=0.1, right=0.5), IntervalDtype("float64")), + (Period("2012-01", freq="M"), "period[M]"), + (Period("2012-02-01", freq="D"), "period[D]"), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(tz="US/Eastern"), + ), + (Timedelta(seconds=500), "timedelta64[ns]"), + ] +) +def ea_scalar_and_dtype(request): + return request.param + + # ---------------------------------------------------------------- # Operators & Operations # ---------------------------------------------------------------- diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 27c12aa4fb3d1..d32ca454b5fb2 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -717,21 +717,12 @@ def test_constructor_period_dict(self): assert df["a"].dtype == a.dtype assert df["b"].dtype == b.dtype - @pytest.mark.parametrize( - "data,dtype", - [ - (Period("2012-01", freq="M"), "period[M]"), - (Period("2012-02-01", freq="D"), "period[D]"), - (Interval(left=0, right=5), IntervalDtype("int64")), - (Interval(left=0.1, right=0.5), IntervalDtype("float64")), - ], - ) - def test_constructor_period_dict_scalar(self, data, dtype): - # scalar periods - df = DataFrame({"a": data}, index=[0]) - assert df["a"].dtype == dtype + def test_constructor_dict_extension_scalar(self, ea_scalar_and_dtype): + ea_scalar, ea_dtype = ea_scalar_and_dtype + df = DataFrame({"a": ea_scalar}, index=[0]) + assert df["a"].dtype == ea_dtype - expected = DataFrame(index=[0], columns=["a"], data=data) + expected = DataFrame(index=[0], columns=["a"], data=ea_scalar) tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index debd516da9eec..d790a85c94193 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -9,12 +9,7 @@ from pandas._libs import iNaT, lib from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype -from pandas.core.dtypes.dtypes import ( - CategoricalDtype, - DatetimeTZDtype, - IntervalDtype, - PeriodDtype, -) +from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd from pandas import ( @@ -95,6 +90,17 @@ def test_scalar_conversion(self): assert float(Series([1.0])) == 1.0 assert int(Series([1.0])) == 1 + def test_scalar_extension_dtype(self, ea_scalar_and_dtype): + # GH 28401 + + ea_scalar, ea_dtype = ea_scalar_and_dtype + + ser = Series(ea_scalar, index=range(3)) + expected = Series([ea_scalar] * 3, dtype=ea_dtype) + + assert ser.dtype == ea_dtype + tm.assert_series_equal(ser, expected) + def test_constructor(self, datetime_series): with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): empty_series = Series() @@ -1107,23 +1113,13 @@ def test_constructor_dict_order(self): expected = Series([1, 0, 2], index=list("bac")) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize( - "data,dtype", - [ - (Period("2020-01"), PeriodDtype("M")), - (Interval(left=0, right=5), IntervalDtype("int64")), - ( - Timestamp("2011-01-01", tz="US/Eastern"), - DatetimeTZDtype(tz="US/Eastern"), - ), - ], - ) - def test_constructor_dict_extension(self, data, dtype): - d = {"a": data} + def test_constructor_dict_extension(self, ea_scalar_and_dtype): + ea_scalar, ea_dtype = ea_scalar_and_dtype + d = {"a": ea_scalar} result = Series(d, index=["a"]) - expected = Series(data, index=["a"], dtype=dtype) + expected = Series(ea_scalar, index=["a"], dtype=ea_dtype) - assert result.dtype == dtype + assert result.dtype == ea_dtype tm.assert_series_equal(result, expected)
- [x] closes #28401 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37989
2020-11-21T19:48:15Z
2020-11-24T13:33:07Z
2020-11-24T13:33:06Z
2020-11-27T05:08:52Z
TST: add test to verify column does not lose categorical type when using loc
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 28846bcf2f14d..9dbae874c1a93 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -12,6 +12,7 @@ import pandas as pd from pandas import ( + Categorical, CategoricalIndex, DataFrame, Index, @@ -1285,6 +1286,13 @@ def test_loc_setitem_datetime_keys_cast(self): expected = DataFrame({"one": [100.0, 200.0]}, index=[dt1, dt2]) tm.assert_frame_equal(df, expected) + def test_loc_setitem_categorical_column_retains_dtype(self, ordered): + # GH16360 + result = DataFrame({"A": [1]}) + result.loc[:, "B"] = Categorical(["b"], ordered=ordered) + expected = DataFrame({"A": [1], "B": Categorical(["b"], ordered=ordered)}) + tm.assert_frame_equal(result, expected) + class TestLocCallable: def test_frame_loc_getitem_callable(self):
- [x] closes #16360 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry (not applicable) Added a test to verify the behavior described in issue #16360. However, I'm not 100% sure about the location of the test.
https://api.github.com/repos/pandas-dev/pandas/pulls/37988
2020-11-21T18:26:51Z
2020-11-23T18:59:08Z
2020-11-23T18:59:08Z
2020-11-23T20:11:24Z
CLN/TST: delegate StringArray.fillna() to parent class + add tests
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 3b297e7c2b13b..e75305e55348c 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -282,10 +282,6 @@ def __setitem__(self, key, value): super().__setitem__(key, value) - def fillna(self, value=None, method=None, limit=None): - # TODO: validate dtype - return super().fillna(value, method, limit) - def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) if isinstance(dtype, StringDtype): diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 07e9484994c26..9a1634380aaba 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -422,6 +422,24 @@ def test_reduce_missing(skipna, dtype): assert pd.isna(result) +def test_fillna_args(): + # GH 37987 + + arr = pd.array(["a", pd.NA], dtype="string") + + res = arr.fillna(value="b") + expected = pd.array(["a", "b"], dtype="string") + tm.assert_extension_array_equal(res, expected) + + res = arr.fillna(value=np.str_("b")) + expected = pd.array(["a", "b"], dtype="string") + tm.assert_extension_array_equal(res, expected) + + msg = "Cannot set non-string value '1' into a StringArray." + with pytest.raises(ValueError, match=msg): + arr.fillna(value=1) + + @td.skip_if_no("pyarrow", min_version="0.15.0") def test_arrow_array(dtype): # protocol added in 0.15.0
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37987
2020-11-21T03:28:28Z
2020-11-26T16:41:38Z
2020-11-26T16:41:38Z
2020-11-26T16:41:42Z
REGR: fix inplace operations for EAs with non-EA arg
diff --git a/doc/source/whatsnew/v1.1.5.rst b/doc/source/whatsnew/v1.1.5.rst index 323342cb43950..609c3650c8cc2 100644 --- a/doc/source/whatsnew/v1.1.5.rst +++ b/doc/source/whatsnew/v1.1.5.rst @@ -17,7 +17,7 @@ Fixed regressions - Regression in addition of a timedelta-like scalar to a :class:`DatetimeIndex` raising incorrectly (:issue:`37295`) - Fixed regression in :meth:`Series.groupby` raising when the :class:`Index` of the :class:`Series` had a tuple as its name (:issue:`37755`) - Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` for ``__setitem__`` when one-dimensional tuple was given to select from :class:`MultiIndex` (:issue:`37711`) -- +- Fixed regression in inplace operations on :class:`Series` with ``ExtensionDtype`` with NumPy dtyped operand (:issue:`37910`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3aa692c5d3d43..e2b3406c6b1c5 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -70,6 +70,7 @@ is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like, + is_dtype_equal, is_extension_array_dtype, is_float, is_list_like, @@ -11266,7 +11267,11 @@ def _inplace_method(self, other, op): """ result = op(self, other) - if self.ndim == 1 and result._indexed_same(self) and result.dtype == self.dtype: + if ( + self.ndim == 1 + and result._indexed_same(self) + and is_dtype_equal(result.dtype, self.dtype) + ): # GH#36498 this inplace op can _actually_ be inplace. self._values[:] = result._values return self diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 6aad2cadf78ba..c5196cea5d3bb 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -832,6 +832,40 @@ def test_scalarop_preserve_name(self, datetime_series): assert result.name == datetime_series.name +class TestInplaceOperations: + @pytest.mark.parametrize( + "dtype1, dtype2, dtype_expected, dtype_mul", + ( + ("Int64", "Int64", "Int64", "Int64"), + ("float", "float", "float", "float"), + ("Int64", "float", "float", "float"), + pytest.param( + "Int64", + "Float64", + "Float64", + "Float64", + marks=pytest.mark.xfail(reason="Not implemented yet"), + ), + ), + ) + def test_series_inplace_ops(self, dtype1, dtype2, dtype_expected, dtype_mul): + # GH 37910 + + ser1 = Series([1], dtype=dtype1) + ser2 = Series([2], dtype=dtype2) + ser1 += ser2 + expected = Series([3], dtype=dtype_expected) + tm.assert_series_equal(ser1, expected) + + ser1 -= ser2 + expected = Series([1], dtype=dtype_expected) + tm.assert_series_equal(ser1, expected) + + ser1 *= ser2 + expected = Series([2], dtype=dtype_mul) + tm.assert_series_equal(ser1, expected) + + def test_none_comparison(series_with_simple_index): series = series_with_simple_index if isinstance(series.index, IntervalIndex):
- [x] closes #37910 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37986
2020-11-21T03:01:47Z
2020-11-24T00:24:01Z
2020-11-24T00:24:01Z
2020-11-24T09:37:47Z
CLN: make MultiIndex._shallow_copy signature match other subclasses
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5eb890c9817c0..7658230d9e1dd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3986,7 +3986,11 @@ def _join_monotonic(self, other, how="left", return_indexers=False): else: return join_index - def _wrap_joined_index(self, joined, other): + def _wrap_joined_index( + self: _IndexT, joined: np.ndarray, other: _IndexT + ) -> _IndexT: + assert other.dtype == self.dtype + if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None else: @@ -4188,7 +4192,7 @@ def _is_memory_usage_qualified(self) -> bool: """ return self.is_object() - def is_type_compatible(self, kind) -> bool: + def is_type_compatible(self, kind: str_t) -> bool: """ Whether the index type is compatible with the provided type. """ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index f0b37b810b28a..bca6661f54900 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -403,6 +403,36 @@ def _format_attrs(self): attrs.append(("freq", freq)) return attrs + def _summary(self, name=None) -> str: + """ + Return a summarized representation. + + Parameters + ---------- + name : str + Name to use in the summary representation. + + Returns + ------- + str + Summarized representation of the index. + """ + formatter = self._formatter_func + if len(self) > 0: + index_summary = f", {formatter(self[0])} to {formatter(self[-1])}" + else: + index_summary = "" + + if name is None: + name = type(self).__name__ + result = f"{name}: {len(self)} entries{index_summary}" + if self.freq: + result += f"\nFreq: {self.freqstr}" + + # display as values, not quoted + result = result.replace("'", "") + return result + # -------------------------------------------------------------------- # Indexing Methods @@ -507,36 +537,6 @@ def where(self, cond, other=None): arr = self._data._from_backing_data(result) return type(self)._simple_new(arr, name=self.name) - def _summary(self, name=None) -> str: - """ - Return a summarized representation. - - Parameters - ---------- - name : str - Name to use in the summary representation. - - Returns - ------- - str - Summarized representation of the index. - """ - formatter = self._formatter_func - if len(self) > 0: - index_summary = f", {formatter(self[0])} to {formatter(self[-1])}" - else: - index_summary = "" - - if name is None: - name = type(self).__name__ - result = f"{name}: {len(self)} entries{index_summary}" - if self.freq: - result += f"\nFreq: {self.freqstr}" - - # display as values, not quoted - result = result.replace("'", "") - return result - def shift(self, periods=1, freq=None): """ Shift index by desired number of time frequency increments. diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 4bafda9c0a611..1dd3eb1017eca 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -814,8 +814,8 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): # -------------------------------------------------------------------- - def is_type_compatible(self, typ) -> bool: - return typ == self.inferred_type or typ == "datetime" + def is_type_compatible(self, kind: str) -> bool: + return kind == self.inferred_type or kind == "datetime" @property def inferred_type(self) -> str: diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 9eb34d920a328..7a37154035ab2 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1065,34 +1065,19 @@ def _engine(self): @property def _constructor(self): - return MultiIndex.from_tuples + return type(self).from_tuples @doc(Index._shallow_copy) - def _shallow_copy( - self, - values=None, - name=lib.no_default, - levels=None, - codes=None, - sortorder=None, - names=lib.no_default, - ): - if names is not lib.no_default and name is not lib.no_default: - raise TypeError("Can only provide one of `names` and `name`") - elif names is lib.no_default: - names = name if name is not lib.no_default else self.names + def _shallow_copy(self, values=None, name=lib.no_default): + names = name if name is not lib.no_default else self.names if values is not None: - assert levels is None and codes is None - return MultiIndex.from_tuples(values, sortorder=sortorder, names=names) + return type(self).from_tuples(values, sortorder=None, names=names) - levels = levels if levels is not None else self.levels - codes = codes if codes is not None else self.codes - - result = MultiIndex( - levels=levels, - codes=codes, - sortorder=sortorder, + result = type(self)( + levels=self.levels, + codes=self.codes, + sortorder=None, names=names, verify_integrity=False, ) @@ -1100,18 +1085,6 @@ def _shallow_copy( result._cache.pop("levels", None) # GH32669 return result - def symmetric_difference(self, other, result_name=None, sort=None): - # On equal symmetric_difference MultiIndexes the difference is empty. - # Therefore, an empty MultiIndex is returned GH13490 - tups = Index.symmetric_difference(self, other, result_name, sort) - if len(tups) == 0: - return MultiIndex( - levels=[[] for _ in range(self.nlevels)], - codes=[[] for _ in range(self.nlevels)], - names=tups.name, - ) - return type(self).from_tuples(tups, names=tups.name) - # -------------------------------------------------------------------- def copy( @@ -1177,12 +1150,18 @@ def copy( if codes is None: codes = deepcopy(self.codes) - new_index = self._shallow_copy( + levels = levels if levels is not None else self.levels + codes = codes if codes is not None else self.codes + + new_index = type(self)( levels=levels, codes=codes, - names=names, sortorder=self.sortorder, + names=names, + verify_integrity=False, ) + new_index._cache = self._cache.copy() + new_index._cache.pop("levels", None) # GH32669 if dtype: warnings.warn( @@ -3612,6 +3591,18 @@ def _convert_can_do_setop(self, other): return other, result_names + def symmetric_difference(self, other, result_name=None, sort=None): + # On equal symmetric_difference MultiIndexes the difference is empty. + # Therefore, an empty MultiIndex is returned GH13490 + tups = Index.symmetric_difference(self, other, result_name, sort) + if len(tups) == 0: + return type(self)( + levels=[[] for _ in range(self.nlevels)], + codes=[[] for _ in range(self.nlevels)], + names=tups.name, + ) + return type(self).from_tuples(tups, names=tups.name) + # -------------------------------------------------------------------- @doc(Index.astype) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 28e3aa69f0bb5..e25119162368f 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -250,7 +250,7 @@ def __new__( @property def values(self) -> np.ndarray: - return np.asarray(self) + return np.asarray(self, dtype=object) def _maybe_convert_timedelta(self, other): """ diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 6ae10ad2f5da2..27e090f450cd8 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -229,8 +229,8 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): # ------------------------------------------------------------------- - def is_type_compatible(self, typ) -> bool: - return typ == self.inferred_type or typ == "timedelta" + def is_type_compatible(self, kind: str) -> bool: + return kind == self.inferred_type or kind == "timedelta" @property def inferred_type(self) -> str:
annotations put MultiIndex.symmetric_difference adjacent to other set operations
https://api.github.com/repos/pandas-dev/pandas/pulls/37985
2020-11-21T02:22:27Z
2020-11-21T21:29:36Z
2020-11-21T21:29:35Z
2020-11-21T21:32:12Z
BUG: IntervalArray.astype(categorical_dtype) losing ordered
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index bc5229d4b4296..cda1efeeb1b2a 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -589,6 +589,7 @@ Interval - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` where :class:`Interval` dtypes would be converted to object dtypes (:issue:`34871`) - Bug in :meth:`IntervalIndex.take` with negative indices and ``fill_value=None`` (:issue:`37330`) - Bug in :meth:`IntervalIndex.putmask` with datetime-like dtype incorrectly casting to object dtype (:issue:`37968`) +- Bug in :meth:`IntervalArray.astype` incorrectly dropping dtype information with a :class:`CategoricalDtype` object (:issue:`37984`) - Indexing diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index d007bb112c86c..2b719c717e624 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -702,7 +702,7 @@ def astype(self, dtype, copy=True): combined = _get_combined_data(new_left, new_right) return type(self)._simple_new(combined, closed=self.closed) elif is_categorical_dtype(dtype): - return Categorical(np.asarray(self)) + return Categorical(np.asarray(self), dtype=dtype) elif isinstance(dtype, StringDtype): return dtype.construct_array_type()._from_sequence(self, copy=False) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index b1b3c594512b1..01381789a68a9 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -373,9 +373,7 @@ def __reduce__(self): def astype(self, dtype, copy: bool = True): with rewrite_exception("IntervalArray", type(self).__name__): new_values = self._values.astype(dtype, copy=copy) - if is_interval_dtype(new_values.dtype): - return self._shallow_copy(new_values) - return Index.astype(self, dtype, copy=copy) + return Index(new_values, dtype=new_values.dtype, name=self.name) @property def inferred_type(self) -> str: diff --git a/pandas/tests/arrays/interval/test_astype.py b/pandas/tests/arrays/interval/test_astype.py new file mode 100644 index 0000000000000..e118e40196e43 --- /dev/null +++ b/pandas/tests/arrays/interval/test_astype.py @@ -0,0 +1,23 @@ +import pytest + +from pandas import Categorical, CategoricalDtype, Index, IntervalIndex +import pandas._testing as tm + + +class TestAstype: + @pytest.mark.parametrize("ordered", [True, False]) + def test_astype_categorical_retains_ordered(self, ordered): + index = IntervalIndex.from_breaks(range(5)) + arr = index._data + + dtype = CategoricalDtype(None, ordered=ordered) + + expected = Categorical(list(arr), ordered=ordered) + result = arr.astype(dtype) + assert result.ordered is ordered + tm.assert_categorical_equal(result, expected) + + # test IntervalIndex.astype while we're at it. + result = index.astype(dtype) + expected = Index(expected) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py index c94af6c0d533e..7bf1ea7355b61 100644 --- a/pandas/tests/indexes/interval/test_astype.py +++ b/pandas/tests/indexes/interval/test_astype.py @@ -114,7 +114,13 @@ def test_subtype_integer_errors(self): # int64 -> uint64 fails with negative values index = interval_range(-10, 10) dtype = IntervalDtype("uint64") - with pytest.raises(ValueError): + + # Until we decide what the exception message _should_ be, we + # assert something that it should _not_ be. + # We should _not_ be getting a message suggesting that the -10 + # has been wrapped around to a large-positive integer + msg = "^(?!(left side of interval must be <= right side))" + with pytest.raises(ValueError, match=msg): index.astype(dtype)
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37984
2020-11-21T00:00:09Z
2020-11-22T14:00:54Z
2020-11-22T14:00:54Z
2020-11-22T15:32:00Z
CLN: remove panel compat shim
diff --git a/ci/deps/travis-37-locale.yaml b/ci/deps/travis-37-locale.yaml index e93a86910bf34..4e442b10482a7 100644 --- a/ci/deps/travis-37-locale.yaml +++ b/ci/deps/travis-37-locale.yaml @@ -34,7 +34,7 @@ dependencies: - pyarrow>=0.17 - pytables>=3.5.1 - scipy - - xarray=0.12.0 + - xarray=0.12.3 - xlrd - xlsxwriter - xlwt diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index df481e8c986f7..c823ad01f10bf 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -284,7 +284,7 @@ pyxlsb 1.0.6 Reading for xlsb files qtpy Clipboard I/O s3fs 0.4.0 Amazon S3 access tabulate 0.8.3 Printing in Markdown-friendly format (see `tabulate`_) -xarray 0.12.0 pandas-like API for N-dimensional data +xarray 0.12.3 pandas-like API for N-dimensional data xclip Clipboard I/O on linux xlrd 1.2.0 Excel reading xlwt 1.3.0 Excel writing diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index 9d5649c37e92f..f7c5eaf242b34 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -30,7 +30,6 @@ public functions related to data types in pandas. series frame arrays - panel indexing offset_frequency window diff --git a/doc/source/reference/panel.rst b/doc/source/reference/panel.rst deleted file mode 100644 index 37d48c2dadf2e..0000000000000 --- a/doc/source/reference/panel.rst +++ /dev/null @@ -1,10 +0,0 @@ -{{ header }} - -.. _api.panel: - -===== -Panel -===== -.. currentmodule:: pandas - -``Panel`` was removed in 0.25.0. For prior documentation, see the `0.24 documentation <https://pandas.pydata.org/pandas-docs/version/0.24/reference/panel.html>`_ diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index bc5229d4b4296..08e379ad2b413 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -657,6 +657,7 @@ I/O - Parse missing values using :func:`read_json` with ``dtype=False`` to ``NaN`` instead of ``None`` (:issue:`28501`) - :meth:`read_fwf` was inferring compression with ``compression=None`` which was not consistent with the other :meth:``read_*`` functions (:issue:`37909`) - :meth:`DataFrame.to_html` was ignoring ``formatters`` argument for ``ExtensionDtype`` columns (:issue:`36525`) +- Bumped minimum xarray version to 0.12.3 to avoid reference to the removed ``Panel`` class (:issue:`27101`) Period ^^^^^^ diff --git a/pandas/__init__.py b/pandas/__init__.py index b9b7d5d064855..cc5d835a52833 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -189,25 +189,10 @@ # GH 27101 -# TODO: remove Panel compat in 1.0 def __getattr__(name): import warnings - if name == "Panel": - - warnings.warn( - "The Panel class is removed from pandas. Accessing it " - "from the top-level namespace will also be removed in the next version", - FutureWarning, - stacklevel=2, - ) - - class Panel: - pass - - return Panel - - elif name == "datetime": + if name == "datetime": warnings.warn( "The pandas.datetime class is deprecated " "and will be removed from pandas in a future version. " diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index d3c7888cac704..533e67acfa2f4 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -25,7 +25,7 @@ "sqlalchemy": "1.2.8", "tables": "3.5.1", "tabulate": "0.8.3", - "xarray": "0.12.0", + "xarray": "0.12.3", "xlrd": "1.2.0", "xlwt": "1.3.0", "xlsxwriter": "1.0.2", diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 392be699b6fc0..83016a08de90b 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -41,7 +41,6 @@ def test_dask(df): assert ddf.compute() is not None -@pytest.mark.filterwarnings("ignore:Panel class is removed") def test_xarray(df): xarray = import_module("xarray") # noqa diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py index b8839c83d00b9..d521f2ee421be 100755 --- a/scripts/validate_rst_title_capitalization.py +++ b/scripts/validate_rst_title_capitalization.py @@ -138,7 +138,6 @@ "Google", "CategoricalDtype", "UTC", - "Panel", "False", "Styler", "os",
remove panel shim
https://api.github.com/repos/pandas-dev/pandas/pulls/37983
2020-11-20T23:00:54Z
2020-11-23T13:34:25Z
2020-11-23T13:34:25Z
2020-12-05T18:51:15Z
DEPR: how keyword in PeriodIndex.astype
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ecea79be5b4dc..c8623c047980d 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -476,6 +476,7 @@ Deprecations - :meth:`Categorical.is_dtype_equal` and :meth:`CategoricalIndex.is_dtype_equal` are deprecated, will be removed in a future version (:issue:`37545`) - :meth:`Series.slice_shift` and :meth:`DataFrame.slice_shift` are deprecated, use :meth:`Series.shift` or :meth:`DataFrame.shift` instead (:issue:`37601`) - Partial slicing on unordered :class:`DatetimeIndex` with keys, which are not in Index is deprecated and will be removed in a future version (:issue:`18531`) +- The ``how`` keyword in :meth:`PeriodIndex.astype` is deprecated and will be removed in a future version, use ``index.to_timestamp(how=how)`` instead (:issue:`37982`) - Deprecated :meth:`Index.asi8` for :class:`Index` subclasses other than :class:`DatetimeIndex`, :class:`TimedeltaIndex`, and :class:`PeriodIndex` (:issue:`37877`) - The ``inplace`` parameter of :meth:`Categorical.remove_unused_categories` is deprecated and will be removed in a future version (:issue:`37643`) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 38abc18b5f1cb..28e3aa69f0bb5 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -1,9 +1,10 @@ from datetime import datetime, timedelta from typing import Any, cast +import warnings import numpy as np -from pandas._libs import index as libindex +from pandas._libs import index as libindex, lib from pandas._libs.tslibs import BaseOffset, Period, Resolution, Tick from pandas._libs.tslibs.parsing import DateParseError, parse_time_string from pandas._typing import DtypeObj @@ -376,15 +377,26 @@ def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray: return super().asof_locs(where, mask) @doc(Index.astype) - def astype(self, dtype, copy: bool = True, how="start"): + def astype(self, dtype, copy: bool = True, how=lib.no_default): dtype = pandas_dtype(dtype) + if how is not lib.no_default: + # GH#37982 + warnings.warn( + "The 'how' keyword in PeriodIndex.astype is deprecated and " + "will be removed in a future version. " + "Use index.to_timestamp(how=how) instead", + FutureWarning, + stacklevel=2, + ) + else: + how = "start" + if is_datetime64_any_dtype(dtype): # 'how' is index-specific, isn't part of the EA interface. tz = getattr(dtype, "tz", None) return self.to_timestamp(how=how).tz_localize(tz) - # TODO: should probably raise on `how` here, so we don't ignore it. return super().astype(dtype, copy=copy) @property diff --git a/pandas/tests/indexes/period/test_astype.py b/pandas/tests/indexes/period/test_astype.py index fa1617bdfaa52..674d09c6a7a8c 100644 --- a/pandas/tests/indexes/period/test_astype.py +++ b/pandas/tests/indexes/period/test_astype.py @@ -144,13 +144,17 @@ def test_period_astype_to_timestamp(self): pi = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M") exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], freq="MS") - res = pi.astype("datetime64[ns]") + with tm.assert_produces_warning(FutureWarning): + # how keyword deprecated GH#37982 + res = pi.astype("datetime64[ns]", how="start") tm.assert_index_equal(res, exp) assert res.freq == exp.freq exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"]) exp = exp + Timedelta(1, "D") - Timedelta(1, "ns") - res = pi.astype("datetime64[ns]", how="end") + with tm.assert_produces_warning(FutureWarning): + # how keyword deprecated GH#37982 + res = pi.astype("datetime64[ns]", how="end") tm.assert_index_equal(res, exp) assert res.freq == exp.freq @@ -161,6 +165,8 @@ def test_period_astype_to_timestamp(self): exp = DatetimeIndex(["2011-01-31", "2011-02-28", "2011-03-31"], tz="US/Eastern") exp = exp + Timedelta(1, "D") - Timedelta(1, "ns") - res = pi.astype("datetime64[ns, US/Eastern]", how="end") + with tm.assert_produces_warning(FutureWarning): + # how keyword deprecated GH#37982 + res = pi.astype("datetime64[ns, US/Eastern]", how="end") tm.assert_index_equal(res, exp) assert res.freq == exp.freq
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37982
2020-11-20T22:34:35Z
2020-11-20T23:57:10Z
2020-11-20T23:57:10Z
2020-11-21T00:00:49Z
Deprecate inplace in Categorical.remove_categories
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index a286d152f03c3..c4e290f67e5b9 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -607,6 +607,7 @@ Deprecations - Deprecated using :func:`merge` or :func:`join` on a different number of levels (:issue:`34862`) - Deprecated the use of ``**kwargs`` in :class:`.ExcelWriter`; use the keyword argument ``engine_kwargs`` instead (:issue:`40430`) - Deprecated the ``level`` keyword for :class:`DataFrame` and :class:`Series` aggregations; use groupby instead (:issue:`39983`) +- The ``inplace`` parameter of :meth:`Categorical.remove_categories` is deprecated and will be removed in a future version (:issue:`37643`) - Deprecated :func:`merge` producing duplicated columns through the ``suffixes`` keyword and already existing columns (:issue:`22818`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index c51e25776e1c2..853b1b38a444b 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -12,7 +12,11 @@ Union, cast, ) -from warnings import warn +from warnings import ( + catch_warnings, + simplefilter, + warn, +) import numpy as np @@ -1122,7 +1126,7 @@ def add_categories(self, new_categories, inplace=False): if not inplace: return cat - def remove_categories(self, removals, inplace=False): + def remove_categories(self, removals, inplace=no_default): """ Remove the specified categories. @@ -1137,6 +1141,8 @@ def remove_categories(self, removals, inplace=False): Whether or not to remove the categories inplace or return a copy of this categorical with removed categories. + .. deprecated:: 1.3.0 + Returns ------- cat : Categorical or None @@ -1155,6 +1161,18 @@ def remove_categories(self, removals, inplace=False): remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. """ + if inplace is not no_default: + warn( + "The `inplace` parameter in pandas.Categorical." + "remove_categories is deprecated and will be removed in " + "a future version. Removing unused categories will always " + "return a new Categorical object.", + FutureWarning, + stacklevel=2, + ) + else: + inplace = False + inplace = validate_bool_kwarg(inplace, "inplace") if not is_list_like(removals): removals = [removals] @@ -2355,14 +2373,20 @@ def replace(self, to_replace, value, inplace: bool = False): continue if replace_value in cat.categories: if isna(new_value): - cat.remove_categories(replace_value, inplace=True) + with catch_warnings(): + simplefilter("ignore") + cat.remove_categories(replace_value, inplace=True) continue + categories = cat.categories.tolist() index = categories.index(replace_value) + if new_value in cat.categories: value_index = categories.index(new_value) cat._codes[cat._codes == index] = value_index - cat.remove_categories(replace_value, inplace=True) + with catch_warnings(): + simplefilter("ignore") + cat.remove_categories(replace_value, inplace=True) else: categories[index] = new_value cat.rename_categories(categories, inplace=True) diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py index 56d474497a166..37f04e5d30a66 100644 --- a/pandas/tests/arrays/categorical/test_analytics.py +++ b/pandas/tests/arrays/categorical/test_analytics.py @@ -326,7 +326,9 @@ def test_validate_inplace_raises(self, value): cat.add_categories(new_categories=["D", "E", "F"], inplace=value) with pytest.raises(ValueError, match=msg): - cat.remove_categories(removals=["D", "E", "F"], inplace=value) + with tm.assert_produces_warning(FutureWarning): + # issue #37643 inplace kwarg deprecated + cat.remove_categories(removals=["D", "E", "F"], inplace=value) with pytest.raises(ValueError, match=msg): with tm.assert_produces_warning(FutureWarning): diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py index a6dea639488a2..b6719d61ffc3c 100644 --- a/pandas/tests/arrays/categorical/test_api.py +++ b/pandas/tests/arrays/categorical/test_api.py @@ -354,7 +354,10 @@ def test_remove_categories(self): tm.assert_categorical_equal(res, new) # inplace == True - res = cat.remove_categories("c", inplace=True) + with tm.assert_produces_warning(FutureWarning): + # issue #37643 inplace kwarg deprecated + res = cat.remove_categories("c", inplace=True) + tm.assert_categorical_equal(cat, new) assert res is None
- [x] xref #37643 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37981
2020-11-20T21:53:57Z
2021-04-21T13:05:10Z
2021-04-21T13:05:10Z
2021-04-21T13:05:14Z
DOC: Fix typo
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8b71ff83400d1..3aa692c5d3d43 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11830,7 +11830,7 @@ def _doc_parms(cls): _any_desc = """\ Return whether any element is True, potentially over an axis. -Returns False unless there at least one element within a series or +Returns False unless there is at least one element within a series or along a Dataframe axis that is True or equivalent (e.g. non-zero or non-empty)."""
https://api.github.com/repos/pandas-dev/pandas/pulls/37980
2020-11-20T18:57:30Z
2020-11-20T22:46:02Z
2020-11-20T22:46:02Z
2020-11-21T00:24:45Z
BUG: CategoricalIndex.where nulling out non-categories
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index bc5229d4b4296..f59155c595af4 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -518,6 +518,8 @@ Categorical - :meth:`Categorical.fillna` will always return a copy, will validate a passed fill value regardless of whether there are any NAs to fill, and will disallow a ``NaT`` as a fill value for numeric categories (:issue:`36530`) - Bug in :meth:`Categorical.__setitem__` that incorrectly raised when trying to set a tuple value (:issue:`20439`) - Bug in :meth:`CategoricalIndex.equals` incorrectly casting non-category entries to ``np.nan`` (:issue:`37667`) +- Bug in :meth:`CatgoricalIndex.where` incorrectly setting non-category entries to ``np.nan`` instead of raising ``TypeError`` (:issue:`37977`) +- Datetimelike ^^^^^^^^^^^^ diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index b40531bd42af8..5cc6525dc3c9b 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -321,3 +321,22 @@ def putmask(self, mask, value): value = self._validate_setitem_value(value) np.putmask(self._ndarray, mask, value) + + def where(self, mask, value): + """ + Analogue to np.where(mask, self, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + + Raises + ------ + TypeError + If value cannot be cast to self.dtype. + """ + value = self._validate_setitem_value(value) + + res_values = np.where(mask, self._ndarray, value) + return self._from_backing_data(res_values) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 413c8f6b45275..e2507aeaeb652 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -403,18 +403,6 @@ def _to_safe_for_reshape(self): """ convert to object if we are a categorical """ return self.astype("object") - @doc(Index.where) - def where(self, cond, other=None): - # TODO: Investigate an alternative implementation with - # 1. copy the underlying Categorical - # 2. setitem with `cond` and `other` - # 3. Rebuild CategoricalIndex. - if other is None: - other = self._na_value - values = np.where(cond, self._values, other) - cat = Categorical(values, dtype=self.dtype) - return type(self)._simple_new(cat, name=self.name) - def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index bca6661f54900..40e27709df841 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -529,14 +529,6 @@ def isin(self, values, level=None): return algorithms.isin(self.asi8, values.asi8) - @Appender(Index.where.__doc__) - def where(self, cond, other=None): - other = self._data._validate_setitem_value(other) - - result = np.where(cond, self._data._ndarray, other) - arr = self._data._from_backing_data(result) - return type(self)._simple_new(arr, name=self.name) - def shift(self, periods=1, freq=None): """ Shift index by desired number of time frequency increments. diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 0aa4b7732c048..6c35b882b5d67 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -378,6 +378,11 @@ def insert(self, loc: int, item): new_arr = arr._from_backing_data(new_vals) return type(self)._simple_new(new_arr, name=self.name) + @doc(Index.where) + def where(self, cond, other=None): + res_values = self._data.where(cond, other) + return type(self)._simple_new(res_values, name=self.name) + def putmask(self, mask, value): res_values = self._data.copy() try: diff --git a/pandas/tests/indexes/categorical/test_indexing.py b/pandas/tests/indexes/categorical/test_indexing.py index cf9360821d37f..617ffdb48b3b7 100644 --- a/pandas/tests/indexes/categorical/test_indexing.py +++ b/pandas/tests/indexes/categorical/test_indexing.py @@ -290,6 +290,18 @@ def test_where(self, klass): result = i.where(klass(cond)) tm.assert_index_equal(result, expected) + def test_where_non_categories(self): + ci = CategoricalIndex(["a", "b", "c", "d"]) + mask = np.array([True, False, True, False]) + + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(ValueError, match=msg): + ci.where(mask, 2) + + with pytest.raises(ValueError, match=msg): + # Test the Categorical method directly + ci._data.where(mask, 2) + class TestContains: def test_contains(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ATM CategoricalIndex.where has some idiosyncratic behavior: ``` ci = CategoricalIndex(["a", "b", "c", "d"]) mask = np.array([True, False, True, False]) >>> ci.where(mask, 2) CategoricalIndex(['a', nan, 'c', nan], categories=['a', 'b', 'c', 'd'], ordered=False, dtype='category') ``` This makes that call raise instead. Index.where is only used in one place in reshape.merge. Might be worth deprecating+privatizing.
https://api.github.com/repos/pandas-dev/pandas/pulls/37977
2020-11-20T15:57:12Z
2020-11-22T01:19:52Z
2020-11-22T01:19:52Z
2020-11-22T01:45:54Z
TST: add messages to bare pytest raises in pandas/tests/io/pytables/test_timezones.py
diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py index 8c8de77990a52..98a2b18d59b09 100644 --- a/pandas/tests/io/pytables/test_timezones.py +++ b/pandas/tests/io/pytables/test_timezones.py @@ -82,7 +82,13 @@ def test_append_with_timezones_dateutil(setup_path): ), index=range(5), ) - with pytest.raises(ValueError): + + msg = ( + r"invalid info for \[values_block_1\] for \[tz\], " + r"existing_value \[dateutil/.*US/Eastern\] " + r"conflicts with new value \[dateutil/.*EET\]" + ) + with pytest.raises(ValueError, match=msg): store.append("df_tz", df) # this is ok @@ -100,7 +106,13 @@ def test_append_with_timezones_dateutil(setup_path): ), index=range(5), ) - with pytest.raises(ValueError): + + msg = ( + r"invalid info for \[B\] for \[tz\], " + r"existing_value \[dateutil/.*EET\] " + r"conflicts with new value \[dateutil/.*CET\]" + ) + with pytest.raises(ValueError, match=msg): store.append("df_tz", df) # as index @@ -169,7 +181,12 @@ def test_append_with_timezones_pytz(setup_path): ), index=range(5), ) - with pytest.raises(ValueError): + + msg = ( + r"invalid info for \[values_block_1\] for \[tz\], " + r"existing_value \[US/Eastern\] conflicts with new value \[EET\]" + ) + with pytest.raises(ValueError, match=msg): store.append("df_tz", df) # this is ok @@ -187,7 +204,12 @@ def test_append_with_timezones_pytz(setup_path): ), index=range(5), ) - with pytest.raises(ValueError): + + msg = ( + r"invalid info for \[B\] for \[tz\], " + r"existing_value \[EET\] conflicts with new value \[CET\]" + ) + with pytest.raises(ValueError, match=msg): store.append("df_tz", df) # as index
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This PR adds messages to the bare pytest raises in pandas/tests/io/pytables/test_timezones.py. This PR references https://github.com/pandas-dev/pandas/issues/30999.
https://api.github.com/repos/pandas-dev/pandas/pulls/37975
2020-11-20T09:43:41Z
2020-11-20T17:46:59Z
2020-11-20T17:46:59Z
2020-11-20T17:47:03Z
BUG: fix astype conversion string -> float
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 766c418741ada..ffd20df85ed1f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -580,6 +580,7 @@ Conversion ^^^^^^^^^^ - Bug in :meth:`DataFrame.to_dict` with ``orient='records'`` now returns python native datetime objects for datetimelike columns (:issue:`21256`) +- Bug in :meth:`Series.astype` conversion from ``string`` to ``float`` raised in presence of ``pd.NA`` values (:issue:`37626`) - Strings diff --git a/pandas/conftest.py b/pandas/conftest.py index 77e9af67590a6..a2c137a1e1aed 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -286,7 +286,6 @@ def unique_nulls_fixture(request): # Generate cartesian product of unique_nulls_fixture: unique_nulls_fixture2 = unique_nulls_fixture - # ---------------------------------------------------------------- # Classes # ---------------------------------------------------------------- @@ -1069,6 +1068,20 @@ def float_ea_dtype(request): return request.param +@pytest.fixture(params=tm.FLOAT_DTYPES + tm.FLOAT_EA_DTYPES) +def any_float_allowed_nullable_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float + * 'float32' + * 'float64' + * 'Float32' + * 'Float64' + """ + return request.param + + @pytest.fixture(params=tm.COMPLEX_DTYPES) def complex_dtype(request): """ diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 3b297e7c2b13b..e0bb788d665eb 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -18,7 +18,8 @@ from pandas.core import ops from pandas.core.array_algos import masked_reductions -from pandas.core.arrays import IntegerArray, PandasArray +from pandas.core.arrays import FloatingArray, IntegerArray, PandasArray +from pandas.core.arrays.floating import FloatingDtype from pandas.core.arrays.integer import _IntegerDtype from pandas.core.construction import extract_array from pandas.core.indexers import check_array_indexer @@ -298,6 +299,19 @@ def astype(self, dtype, copy=True): arr[mask] = 0 values = arr.astype(dtype.numpy_dtype) return IntegerArray(values, mask, copy=False) + elif isinstance(dtype, FloatingDtype): + arr = self.copy() + mask = self.isna() + arr[mask] = "0" + values = arr.astype(dtype.numpy_dtype) + return FloatingArray(values, mask, copy=False) + elif np.issubdtype(dtype, np.floating): + arr = self._ndarray.copy() + mask = self.isna() + arr[mask] = 0 + values = arr.astype(dtype) + values[mask] = np.nan + return values return super().astype(dtype, copy) diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 07e9484994c26..b629e2fca2feb 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -366,6 +366,15 @@ def test_astype_int(dtype, request): tm.assert_extension_array_equal(result, expected) +def test_astype_float(any_float_allowed_nullable_dtype): + # Don't compare arrays (37974) + ser = pd.Series(["1.1", pd.NA, "3.3"], dtype="string") + + result = ser.astype(any_float_allowed_nullable_dtype) + expected = pd.Series([1.1, np.nan, 3.3], dtype=any_float_allowed_nullable_dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.xfail(reason="Not implemented StringArray.sum") def test_reduce(skipna, dtype):
- [x] Is a step towards closing #37626 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Fixes the `string` -> `float` conversion as discussed in #37626 in presence of `pd.NA`. I used the same approach as used in `string` -> `Int64`. Note that differently to the `Int64` case, we return a `numpy.ndarray`, not a `pd.array`. I added a test that compares `pd.Series`. Replacing this with `pd.array` the `tm.assert_numpy_array_equal` assertion will raise, as `expected` is of type `FloatingArray`. IIUC this is a new feature of pandas 1.2.0. Should we return this in `.astype("float")` by default?
https://api.github.com/repos/pandas-dev/pandas/pulls/37974
2020-11-20T07:36:46Z
2020-11-29T16:06:28Z
2020-11-29T16:06:28Z
2020-11-29T19:04:23Z
BLD: set inplace in setup.cfg
diff --git a/Dockerfile b/Dockerfile index b8aff5d671dcf..5d7a2b9e6b743 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,5 +43,5 @@ RUN conda env update -n base -f "$pandas_home/environment.yml" # Build C extensions and pandas RUN cd "$pandas_home" \ - && python setup.py build_ext --inplace -j 4 \ + && python setup.py build_ext -j 4 \ && python -m pip install -e . diff --git a/Makefile b/Makefile index 4f71df51de360..2c968234749f5 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ clean_pyc: -find . -name '*.py[co]' -exec rm {} \; build: clean_pyc - python setup.py build_ext --inplace + python setup.py build_ext lint-diff: git diff upstream/master --name-only -- "*.py" | xargs flake8 diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b1091ea7f60e4..c49742095e1d8 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -40,7 +40,7 @@ jobs: . ~/virtualenvs/pandas-dev/bin/activate && \ python -m pip install --no-deps -U pip wheel setuptools && \ pip install cython numpy python-dateutil pytz pytest pytest-xdist hypothesis pytest-azurepipelines && \ - python setup.py build_ext -q -i -j2 && \ + python setup.py build_ext -q -j2 && \ python -m pip install --no-build-isolation -e . && \ pytest -m 'not slow and not network and not clipboard' pandas --junitxml=test-data.xml" displayName: 'Run 32-bit manylinux2014 Docker Build / Tests' diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml index 601a834d6306a..e510f4115b25f 100644 --- a/ci/azure/windows.yml +++ b/ci/azure/windows.yml @@ -34,7 +34,7 @@ jobs: - bash: | source activate pandas-dev conda list - python setup.py build_ext -q -i -j 4 + python setup.py build_ext -q -j 4 python -m pip install --no-build-isolation -e . displayName: 'Build' diff --git a/ci/setup_env.sh b/ci/setup_env.sh index 8984fa2d9a9be..78951c9def7cb 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -131,7 +131,7 @@ conda list pandas # Make sure any error below is reported as such echo "[Build extensions]" -python setup.py build_ext -q -i -j2 +python setup.py build_ext -q -j2 echo "[Updating pip]" python -m pip install --no-deps -U pip wheel setuptools diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 41b2b7405fcb5..ced0554c51fdf 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -183,7 +183,7 @@ See https://www.jetbrains.com/help/pycharm/docker.html for details. Note that you might need to rebuild the C extensions if/when you merge with upstream/master using:: - python setup.py build_ext --inplace -j 4 + python setup.py build_ext -j 4 .. _contributing.dev_c: @@ -268,7 +268,7 @@ We'll now kick off a three-step process: source activate pandas-dev # Build and install pandas - python setup.py build_ext --inplace -j 4 + python setup.py build_ext -j 4 python -m pip install -e . --no-build-isolation --no-use-pep517 At this point you should be able to import pandas from your locally built version:: @@ -315,7 +315,7 @@ You'll need to have at least Python 3.6.1 installed on your system. python -m pip install -r requirements-dev.txt # Build and install pandas - python setup.py build_ext --inplace -j 4 + python setup.py build_ext -j 4 python -m pip install -e . --no-build-isolation --no-use-pep517 **Unix**/**macOS with pyenv** @@ -339,7 +339,7 @@ Consult the docs for setting up pyenv `here <https://github.com/pyenv/pyenv>`__. python -m pip install -r requirements-dev.txt # Build and install pandas - python setup.py build_ext --inplace -j 4 + python setup.py build_ext -j 4 python -m pip install -e . --no-build-isolation --no-use-pep517 **Windows** @@ -365,7 +365,7 @@ should already exist. python -m pip install -r requirements-dev.txt # Build and install pandas - python setup.py build_ext --inplace -j 4 + python setup.py build_ext -j 4 python -m pip install -e . --no-build-isolation --no-use-pep517 Creating a branch diff --git a/pandas/__init__.py b/pandas/__init__.py index cf7ae2505b72d..b9b7d5d064855 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -33,7 +33,7 @@ raise ImportError( f"C extension: {module} not built. If you want to import " "pandas from the source directory, you may need to run " - "'python setup.py build_ext --inplace --force' to build the C extensions first." + "'python setup.py build_ext --force' to build the C extensions first." ) from e from pandas._config import ( diff --git a/setup.cfg b/setup.cfg index c83a83d599f6c..10c7137dc2f86 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,4 +1,7 @@ +[build_ext] +inplace = 1 + # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files.
specify inplace for build_ext in setup.cfg file
https://api.github.com/repos/pandas-dev/pandas/pulls/37973
2020-11-20T04:42:33Z
2020-11-20T22:46:27Z
2020-11-20T22:46:27Z
2020-12-05T18:53:25Z
TST: add nullable array frame constructor dtype tests
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 951a462bad3e3..27c12aa4fb3d1 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1004,6 +1004,21 @@ def test_constructor_dtype(self, data, index, columns, dtype, expected): df = DataFrame(data, index, columns, dtype) assert df.values.dtype == expected + @pytest.mark.parametrize( + "data,input_dtype,expected_dtype", + ( + ([True, False, None], "boolean", pd.BooleanDtype), + ([1.0, 2.0, None], "Float64", pd.Float64Dtype), + ([1, 2, None], "Int64", pd.Int64Dtype), + (["a", "b", "c"], "string", pd.StringDtype), + ), + ) + def test_constructor_dtype_nullable_extension_arrays( + self, data, input_dtype, expected_dtype + ): + df = DataFrame({"a": data}, dtype=input_dtype) + assert df["a"].dtype == expected_dtype() + def test_constructor_scalar_inference(self): data = {"int": 1, "bool": True, "float": 3.0, "complex": 4j, "object": "foo"} df = DataFrame(data, index=np.arange(10))
- [x] closes #28424 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37972
2020-11-20T04:04:52Z
2020-11-21T22:21:47Z
2020-11-21T22:21:46Z
2020-11-21T22:44:09Z
PERF: IntervalArray.argsort
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index d007bb112c86c..500e96a0c2784 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -619,6 +619,24 @@ def __lt__(self, other): def __le__(self, other): return self._cmp_method(other, operator.le) + def argsort( + self, + ascending: bool = True, + kind: str = "quicksort", + na_position: str = "last", + *args, + **kwargs, + ) -> np.ndarray: + ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs) + + if ascending and kind == "quicksort" and na_position == "last": + return np.lexsort((self.right, self.left)) + + # TODO: other cases we can use lexsort for? much more performant. + return super().argsort( + ascending=ascending, kind=kind, na_position=na_position, **kwargs + ) + def fillna(self, value=None, method=None, limit=None): """ Fill NA/NaN values using the specified method. diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index b0f8be986fe5d..de0ad03ee7105 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -957,11 +957,6 @@ def _format_space(self) -> str: space = " " * (len(type(self).__name__) + 1) return f"\n{space}" - # -------------------------------------------------------------------- - - def argsort(self, *args, **kwargs) -> np.ndarray: - return np.lexsort((self.right, self.left)) - # -------------------------------------------------------------------- # Set Operations
IntervalIndex.argsort has a more performant implementation (at least for default kwargs). This just moves that up to IntervalArray. ``` In [2]: idx = pd.IntervalIndex.from_breaks(range(10**4)) In [3]: arr = idx._data In [4]: %timeit arr.argsort() 24.5 ms ± 458 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) # <-- master 79 µs ± 1.64 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/37971
2020-11-20T03:25:43Z
2020-11-21T21:45:58Z
2020-11-21T21:45:58Z
2020-11-21T22:00:25Z
REF: share more methods in ExtensionIndex
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 9ed977ad1e52e..413c8f6b45275 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -6,7 +6,6 @@ from pandas._config import get_option from pandas._libs import index as libindex -from pandas._libs.hashtable import duplicated_int64 from pandas._libs.lib import no_default from pandas._typing import ArrayLike, Label from pandas.util._decorators import Appender, cache_readonly, doc @@ -14,7 +13,6 @@ from pandas.core.dtypes.common import ( ensure_platform_int, is_categorical_dtype, - is_list_like, is_scalar, ) from pandas.core.dtypes.dtypes import CategoricalDtype @@ -226,9 +224,14 @@ def _simple_new(cls, values: Categorical, name: Label = None): # -------------------------------------------------------------------- + # error: Argument 1 of "_shallow_copy" is incompatible with supertype + # "ExtensionIndex"; supertype defines the argument type as + # "Optional[ExtensionArray]" [override] @doc(Index._shallow_copy) - def _shallow_copy( - self, values: Optional[Categorical] = None, name: Label = no_default + def _shallow_copy( # type:ignore[override] + self, + values: Optional[Categorical] = None, + name: Label = no_default, ): name = self.name if name is no_default else name @@ -247,6 +250,10 @@ def _is_dtype_compat(self, other) -> Categorical: provide a comparison between the dtype of self and other (coercing if needed) + Parameters + ---------- + other : Index + Returns ------- Categorical @@ -263,8 +270,6 @@ def _is_dtype_compat(self, other) -> Categorical: ) else: values = other - if not is_list_like(values): - values = [values] cat = Categorical(other, dtype=self.dtype) other = CategoricalIndex(cat) @@ -358,11 +363,6 @@ def values(self): """ return the underlying data, which is a Categorical """ return self._data - @property - def _has_complex_internals(self) -> bool: - # used to avoid libreduction code paths, which raise or require conversion - return True - @doc(Index.__contains__) def __contains__(self, key: Any) -> bool: # if key is a NaN, check if any NaN is in self. @@ -399,11 +399,6 @@ def unique(self, level=None): # of result, not self. return type(self)._simple_new(result, name=self.name) - @doc(Index.duplicated) - def duplicated(self, keep="first"): - codes = self.codes.astype("i8") - return duplicated_int64(codes, keep) - def _to_safe_for_reshape(self): """ convert to object if we are a categorical """ return self.astype("object") @@ -482,7 +477,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): new_target = np.asarray(new_target) if is_categorical_dtype(target): new_target = Categorical(new_target, dtype=target.dtype) - new_target = target._shallow_copy(new_target, name=self.name) + new_target = type(self)._simple_new(new_target, name=self.name) else: new_target = Index(new_target, name=self.name) @@ -506,7 +501,7 @@ def _reindex_non_unique(self, target): # .reindex returns normal Index. Revert to CategoricalIndex if # all targets are included in my categories new_target = Categorical(new_target, dtype=self.dtype) - new_target = self._shallow_copy(new_target) + new_target = type(self)._simple_new(new_target, name=self.name) return new_target, indexer, new_indexer diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index f80254b91231a..f0b37b810b28a 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -10,7 +10,6 @@ from pandas._libs.tslibs import BaseOffset, Resolution, Tick from pandas._typing import Callable, Label from pandas.compat.numpy import function as nv -from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, cache_readonly, doc from pandas.core.dtypes.common import ( @@ -124,16 +123,6 @@ def _simple_new( def _is_all_dates(self) -> bool: return True - def _shallow_copy(self, values=None, name: Label = lib.no_default): - name = self.name if name is lib.no_default else name - - if values is not None: - return self._simple_new(values, name=name) - - result = self._simple_new(self._data, name=name) - result._cache = self._cache - return result - # ------------------------------------------------------------------------ # Abstract data attributes @@ -399,7 +388,7 @@ def _format_with_header( @property def _formatter_func(self): - raise AbstractMethodError(self) + return self._data._formatter() def _format_attrs(self): """ @@ -692,6 +681,11 @@ def _with_freq(self, freq): arr = self._data._with_freq(freq) return type(self)._simple_new(arr, name=self.name) + @property + def _has_complex_internals(self) -> bool: + # used to avoid libreduction code paths, which raise or require conversion + return False + # -------------------------------------------------------------------- # Set Operation Methods diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e262d33e1aaf0..4bafda9c0a611 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -20,11 +20,8 @@ from pandas.core.dtypes.common import ( DT64NS_DTYPE, - is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, - is_float, - is_integer, is_scalar, ) from pandas.core.dtypes.missing import is_valid_nat_for_dtype @@ -354,8 +351,6 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ - if not is_datetime64_any_dtype(dtype): - return False if self.tz is not None: # If we have tz, we can compare to tzaware return is_datetime64tz_dtype(dtype) @@ -720,9 +715,6 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): """ assert kind in ["loc", "getitem", None] - if is_float(label) or isinstance(label, time) or is_integer(label): - self._invalid_indexer("slice", label) - if isinstance(label, str): freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None)) parsed, reso = parsing.parse_time_string(label, freq) @@ -739,6 +731,9 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): return lower if side == "left" else upper elif isinstance(label, (self._data._recognized_scalars, date)): self._deprecate_mismatched_indexing(label) + else: + self._invalid_indexer("slice", label) + return self._maybe_cast_for_get_loc(label) def _get_string_slice(self, key: str): diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 5db84a5d0a50a..0aa4b7732c048 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -1,10 +1,12 @@ """ Shared methods for Index subclasses backed by ExtensionArray. """ -from typing import List, TypeVar +from typing import List, Optional, TypeVar import numpy as np +from pandas._libs import lib +from pandas._typing import Label from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly, doc @@ -211,6 +213,24 @@ class ExtensionIndex(Index): __le__ = _make_wrapped_comparison_op("__le__") __ge__ = _make_wrapped_comparison_op("__ge__") + @doc(Index._shallow_copy) + def _shallow_copy( + self, values: Optional[ExtensionArray] = None, name: Label = lib.no_default + ): + name = self.name if name is lib.no_default else name + + if values is not None: + return self._simple_new(values, name=name) + + result = self._simple_new(self._data, name=name) + result._cache = self._cache + return result + + @property + def _has_complex_internals(self) -> bool: + # used to avoid libreduction code paths, which raise or require conversion + return True + # --------------------------------------------------------------------- # NDarray-Like Methods @@ -251,7 +271,7 @@ def _get_engine_target(self) -> np.ndarray: def repeat(self, repeats, axis=None): nv.validate_repeat(tuple(), dict(axis=axis)) result = self._data.repeat(repeats, axis=axis) - return self._shallow_copy(result) + return type(self)._simple_new(result, name=self.name) def insert(self, loc: int, item): # ExtensionIndex subclasses must override Index.insert diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 3c96685638ee8..b1b3c594512b1 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -320,19 +320,6 @@ def from_tuples( # -------------------------------------------------------------------- - @Appender(Index._shallow_copy.__doc__) - def _shallow_copy( - self, values: Optional[IntervalArray] = None, name: Label = lib.no_default - ): - name = self.name if name is lib.no_default else name - - if values is not None: - return self._simple_new(values, name=name) - - result = self._simple_new(self._data, name=name) - result._cache = self._cache - return result - @cache_readonly def _engine(self): left = self._maybe_convert_i8(self.left) @@ -373,11 +360,6 @@ def values(self) -> IntervalArray: """ return self._data - @property - def _has_complex_internals(self) -> bool: - # used to avoid libreduction code paths, which raise or require conversion - return True - def __array_wrap__(self, result, context=None): # we don't want the superclass implementation return result @@ -893,7 +875,7 @@ def delete(self, loc): new_left = self.left.delete(loc) new_right = self.right.delete(loc) result = IntervalArray.from_arrays(new_left, new_right, closed=self.closed) - return self._shallow_copy(result) + return type(self)._simple_new(result, name=self.name) def insert(self, loc, item): """ @@ -915,7 +897,7 @@ def insert(self, loc, item): new_left = self.left.insert(loc, left_insert) new_right = self.right.insert(loc, right_insert) result = IntervalArray.from_arrays(new_left, new_right, closed=self.closed) - return self._shallow_copy(result) + return type(self)._simple_new(result, name=self.name) # -------------------------------------------------------------------- # Rendering Methods diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 6e18b29673ca0..9eb34d920a328 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3629,7 +3629,7 @@ def astype(self, dtype, copy=True): return self._shallow_copy() return self - def _validate_insert_value(self, item): + def _validate_fill_value(self, item): if not isinstance(item, tuple): # Pad the key with empty strings if lower levels of the key # aren't specified: @@ -3652,7 +3652,7 @@ def insert(self, loc: int, item): ------- new_index : Index """ - item = self._validate_insert_value(item) + item = self._validate_fill_value(item) new_levels = [] new_codes = [] diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 0b0f985697da9..38abc18b5f1cb 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -251,11 +251,6 @@ def __new__( def values(self) -> np.ndarray: return np.asarray(self) - @property - def _has_complex_internals(self) -> bool: - # used to avoid libreduction code paths, which raise or require conversion - return True - def _maybe_convert_timedelta(self, other): """ Convert timedelta-like input to an integer multiple of self.freq @@ -307,10 +302,6 @@ def _mpl_repr(self): # how to represent ourselves to matplotlib return self.astype(object)._values - @property - def _formatter_func(self): - return self._data._formatter(boxed=False) - # ------------------------------------------------------------------------ # Indexing diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 8ce04b107d23b..6ae10ad2f5da2 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -157,13 +157,6 @@ def __new__( ) return cls._simple_new(tdarr, name=name) - # ------------------------------------------------------------------- - # Rendering Methods - - @property - def _formatter_func(self): - return self._data._formatter() - # ------------------------------------------------------------------- @doc(Index.astype)
https://api.github.com/repos/pandas-dev/pandas/pulls/37970
2020-11-20T01:55:54Z
2020-11-20T16:18:40Z
2020-11-20T16:18:40Z
2020-11-20T16:20:05Z
BUG: IntervalIndex.putmask with datetimelike dtypes
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index a3b5ba616b258..ff4335e921e3d 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -587,7 +587,7 @@ Interval - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` where :class:`Interval` dtypes would be converted to object dtypes (:issue:`34871`) - Bug in :meth:`IntervalIndex.take` with negative indices and ``fill_value=None`` (:issue:`37330`) -- +- Bug in :meth:`IntervalIndex.putmask` with datetime-like dtype incorrectly casting to object dtype (:issue:`37968`) - Indexing diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 07862e0b9bb48..b40531bd42af8 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -300,3 +300,24 @@ def __repr__(self) -> str: data = ",\n".join(lines) class_name = f"<{type(self).__name__}>" return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}" + + # ------------------------------------------------------------------------ + # __array_function__ methods + + def putmask(self, mask, value): + """ + Analogue to np.putmask(self, mask, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + + Raises + ------ + TypeError + If value cannot be cast to self.dtype. + """ + value = self._validate_setitem_value(value) + + np.putmask(self._ndarray, mask, value) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5209d83ade309..5eb890c9817c0 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4344,11 +4344,9 @@ def putmask(self, mask, value): numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ - values = self.values.copy() + values = self._values.copy() try: converted = self._validate_fill_value(value) - np.putmask(values, mask, converted) - return self._shallow_copy(values) except (ValueError, TypeError) as err: if is_object_dtype(self): raise err @@ -4356,6 +4354,9 @@ def putmask(self, mask, value): # coerces to object return self.astype(object).putmask(mask, value) + np.putmask(values, mask, converted) + return self._shallow_copy(values) + def equals(self, other: object) -> bool: """ Determine if two Index object are equal. diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index c117c32f26d25..5db84a5d0a50a 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -359,15 +359,13 @@ def insert(self, loc: int, item): return type(self)._simple_new(new_arr, name=self.name) def putmask(self, mask, value): + res_values = self._data.copy() try: - value = self._data._validate_setitem_value(value) + res_values.putmask(mask, value) except (TypeError, ValueError): return self.astype(object).putmask(mask, value) - new_values = self._data._ndarray.copy() - np.putmask(new_values, mask, value) - new_arr = self._data._from_backing_data(new_values) - return type(self)._simple_new(new_arr, name=self.name) + return type(self)._simple_new(res_values, name=self.name) def _wrap_joined_index(self: _T, joined: np.ndarray, other: _T) -> _T: name = get_op_result_name(self, other) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index b0f8be986fe5d..3c96685638ee8 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -858,6 +858,22 @@ def mid(self): def length(self): return Index(self._data.length, copy=False) + def putmask(self, mask, value): + arr = self._data.copy() + try: + value_left, value_right = arr._validate_setitem_value(value) + except (ValueError, TypeError): + return self.astype(object).putmask(mask, value) + + if isinstance(self._data._left, np.ndarray): + np.putmask(arr._left, mask, value_left) + np.putmask(arr._right, mask, value_right) + else: + # TODO: special case not needed with __array_function__ + arr._left.putmask(mask, value_left) + arr._right.putmask(mask, value_right) + return type(self)._simple_new(arr, name=self.name) + @Appender(Index.where.__doc__) def where(self, cond, other=None): if other is None: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 9dacdd6dea9ca..24aaf5885fe0e 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -122,7 +122,7 @@ def _validate_fill_value(self, value): # force conversion to object # so we don't lose the bools raise TypeError - if isinstance(value, str): + elif isinstance(value, str) or lib.is_complex(value): raise TypeError return value diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py index c316655fbda8a..343c3d2e145f6 100644 --- a/pandas/tests/indexes/interval/test_base.py +++ b/pandas/tests/indexes/interval/test_base.py @@ -80,6 +80,30 @@ def test_where(self, closed, klass): result = idx.where(klass(cond)) tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("tz", ["US/Pacific", None]) + def test_putmask_dt64(self, tz): + # GH#37968 + dti = date_range("2016-01-01", periods=9, tz=tz) + idx = IntervalIndex.from_breaks(dti) + mask = np.zeros(idx.shape, dtype=bool) + mask[0:3] = True + + result = idx.putmask(mask, idx[-1]) + expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:])) + tm.assert_index_equal(result, expected) + + def test_putmask_td64(self): + # GH#37968 + dti = date_range("2016-01-01", periods=9) + tdi = dti - dti[0] + idx = IntervalIndex.from_breaks(tdi) + mask = np.zeros(idx.shape, dtype=bool) + mask[0:3] = True + + result = idx.putmask(mask, idx[-1]) + expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:])) + tm.assert_index_equal(result, expected) + def test_getitem_2d_deprecated(self): # GH#30588 multi-dim indexing is deprecated, but raising is also acceptable idx = self.create_index()
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Implements `NDArrayBackedExtensionArray.putmask` which will hopefully be rolled into `__array_function__` to simplify a bunch of code.
https://api.github.com/repos/pandas-dev/pandas/pulls/37968
2020-11-20T01:42:56Z
2020-11-20T03:09:18Z
2020-11-20T03:09:18Z
2020-11-20T15:40:53Z
Read csv headers
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 965833c013c03..b04abf512fbeb 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -1627,6 +1627,20 @@ functions - the following example shows reading a CSV file: df = pd.read_csv("https://download.bls.gov/pub/time.series/cu/cu.item", sep="\t") +.. versionadded:: 1.3.0 + +A custom header can be sent alongside HTTP(s) requests by passing a dictionary +of header key value mappings to the ``storage_options`` keyword argument as shown below: + +.. code-block:: python + + headers = {"User-Agent": "pandas"} + df = pd.read_csv( + "https://download.bls.gov/pub/time.series/cu/cu.item", + sep="\t", + storage_options=headers + ) + All URLs which are not local files or HTTP(s) are handled by `fsspec`_, if installed, and its various filesystem implementations (including Amazon S3, Google Cloud, SSH, FTP, webHDFS...). diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 26e548f519ecd..188ef83244be8 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -13,6 +13,26 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ +.. _whatsnew_130.read_csv_json_http_headers: + +Custom HTTP(s) headers when reading csv or json files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When reading from a remote URL that is not handled by fsspec (ie. HTTP and +HTTPS) the dictionary passed to ``storage_options`` will be used to create the +headers included in the request. This can be used to control the User-Agent +header or send other custom headers (:issue:`36688`). +For example: + +.. ipython:: python + + headers = {"User-Agent": "pandas"} + df = pd.read_csv( + "https://download.bls.gov/pub/time.series/cu/cu.item", + sep="\t", + storage_options=headers + ) + .. _whatsnew_130.enhancements.other: diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 3aeb3b664b27f..6d3249802ee5e 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -383,8 +383,7 @@ "storage_options" ] = """storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a non-fsspec URL. - See the fsspec and backend storage implementation docs for the set of - allowed keys and values.""" + host, port, username, password, etc. For HTTP(S) URLs the key-value pairs + are forwarded to ``urllib`` as header options. For other URLs (e.g. + starting with "s3://", and "gcs://") the key-value pairs are forwarded to + ``fsspec``. Please see ``fsspec`` and ``urllib`` for more details.""" diff --git a/pandas/io/common.py b/pandas/io/common.py index 9fede5180e727..250c9422213e7 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -276,12 +276,18 @@ def _get_filepath_or_buffer( fsspec_mode += "b" if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer): - # TODO: fsspec can also handle HTTP via requests, but leaving this unchanged - if storage_options: - raise ValueError( - "storage_options passed with file object or non-fsspec file path" - ) - req = urlopen(filepath_or_buffer) + # TODO: fsspec can also handle HTTP via requests, but leaving this + # unchanged. using fsspec appears to break the ability to infer if the + # server responded with gzipped data + storage_options = storage_options or {} + + # waiting until now for importing to match intended lazy logic of + # urlopen function defined elsewhere in this module + import urllib.request + + # assuming storage_options is to be interpretted as headers + req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options) + req = urlopen(req_info) content_encoding = req.headers.get("Content-Encoding", None) if content_encoding == "gzip": # Override compression based on Content-Encoding header diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 8b1184df92eaf..44b58f244a2ad 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -14,7 +14,13 @@ from pandas import DataFrame, MultiIndex, get_option from pandas.core import generic -from pandas.io.common import IOHandles, get_handle, is_fsspec_url, stringify_path +from pandas.io.common import ( + IOHandles, + get_handle, + is_fsspec_url, + is_url, + stringify_path, +) def get_engine(engine: str) -> "BaseImpl": @@ -66,8 +72,10 @@ def _get_path_or_handle( fs, path_or_handle = fsspec.core.url_to_fs( path_or_handle, **(storage_options or {}) ) - elif storage_options: - raise ValueError("storage_options passed with buffer or non-fsspec filepath") + elif storage_options and (not is_url(path_or_handle) or mode != "rb"): + # can't write to a remote url + # without making use of fsspec at the moment + raise ValueError("storage_options passed with buffer, or non-supported URL") handles = None if ( @@ -79,7 +87,9 @@ def _get_path_or_handle( # use get_handle only when we are very certain that it is not a directory # fsspec resources can also point to directories # this branch is used for example when reading from non-fsspec URLs - handles = get_handle(path_or_handle, mode, is_text=False) + handles = get_handle( + path_or_handle, mode, is_text=False, storage_options=storage_options + ) fs = None path_or_handle = handles.handle return path_or_handle, handles, fs @@ -307,7 +317,9 @@ def read( # use get_handle only when we are very certain that it is not a directory # fsspec resources can also point to directories # this branch is used for example when reading from non-fsspec URLs - handles = get_handle(path, "rb", is_text=False) + handles = get_handle( + path, "rb", is_text=False, storage_options=storage_options + ) path = handles.handle parquet_file = self.api.ParquetFile(path, **parquet_kwargs) @@ -404,10 +416,12 @@ def to_parquet( return None +@doc(storage_options=generic._shared_docs["storage_options"]) def read_parquet( path, engine: str = "auto", columns=None, + storage_options: StorageOptions = None, use_nullable_dtypes: bool = False, **kwargs, ): @@ -432,13 +446,18 @@ def read_parquet( By file-like object, we refer to objects with a ``read()`` method, such as a file handle (e.g. via builtin ``open`` function) or ``StringIO``. - engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' + engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. + + {storage_options} + + .. versionadded:: 1.3.0 + use_nullable_dtypes : bool, default False If True, use dtypes that use ``pd.NA`` as missing value indicator for the resulting DataFrame (only applicable for ``engine="pyarrow"``). @@ -448,6 +467,7 @@ def read_parquet( support dtypes) may change without notice. .. versionadded:: 1.2.0 + **kwargs Any additional kwargs are passed to the engine. @@ -456,6 +476,11 @@ def read_parquet( DataFrame """ impl = get_engine(engine) + return impl.read( - path, columns=columns, use_nullable_dtypes=use_nullable_dtypes, **kwargs + path, + columns=columns, + storage_options=storage_options, + use_nullable_dtypes=use_nullable_dtypes, + **kwargs, ) diff --git a/pandas/tests/io/test_user_agent.py b/pandas/tests/io/test_user_agent.py new file mode 100644 index 0000000000000..8894351597903 --- /dev/null +++ b/pandas/tests/io/test_user_agent.py @@ -0,0 +1,309 @@ +""" +Tests for the pandas custom headers in http(s) requests +""" +import gzip +import http.server +from io import BytesIO +import threading + +import pytest + +import pandas as pd +import pandas._testing as tm + + +class BaseUserAgentResponder(http.server.BaseHTTPRequestHandler): + """ + Base class for setting up a server that can be set up to respond + with a particular file format with accompanying content-type headers. + The interfaces on the different io methods are different enough + that this seemed logical to do. + """ + + def start_processing_headers(self): + """ + shared logic at the start of a GET request + """ + self.send_response(200) + self.requested_from_user_agent = self.headers["User-Agent"] + response_df = pd.DataFrame( + { + "header": [self.requested_from_user_agent], + } + ) + return response_df + + def gzip_bytes(self, response_bytes): + """ + some web servers will send back gzipped files to save bandwidth + """ + bio = BytesIO() + zipper = gzip.GzipFile(fileobj=bio, mode="w") + zipper.write(response_bytes) + zipper.close() + response_bytes = bio.getvalue() + return response_bytes + + def write_back_bytes(self, response_bytes): + """ + shared logic at the end of a GET request + """ + self.wfile.write(response_bytes) + + +class CSVUserAgentResponder(BaseUserAgentResponder): + def do_GET(self): + response_df = self.start_processing_headers() + + self.send_header("Content-Type", "text/csv") + self.end_headers() + + response_bytes = response_df.to_csv(index=False).encode("utf-8") + self.write_back_bytes(response_bytes) + + +class GzippedCSVUserAgentResponder(BaseUserAgentResponder): + def do_GET(self): + response_df = self.start_processing_headers() + self.send_header("Content-Type", "text/csv") + self.send_header("Content-Encoding", "gzip") + self.end_headers() + + response_bytes = response_df.to_csv(index=False).encode("utf-8") + response_bytes = self.gzip_bytes(response_bytes) + + self.write_back_bytes(response_bytes) + + +class JSONUserAgentResponder(BaseUserAgentResponder): + def do_GET(self): + response_df = self.start_processing_headers() + self.send_header("Content-Type", "application/json") + self.end_headers() + + response_bytes = response_df.to_json().encode("utf-8") + + self.write_back_bytes(response_bytes) + + +class GzippedJSONUserAgentResponder(BaseUserAgentResponder): + def do_GET(self): + response_df = self.start_processing_headers() + self.send_header("Content-Type", "application/json") + self.send_header("Content-Encoding", "gzip") + self.end_headers() + + response_bytes = response_df.to_json().encode("utf-8") + response_bytes = self.gzip_bytes(response_bytes) + + self.write_back_bytes(response_bytes) + + +class ParquetPyArrowUserAgentResponder(BaseUserAgentResponder): + def do_GET(self): + response_df = self.start_processing_headers() + self.send_header("Content-Type", "application/octet-stream") + self.end_headers() + + response_bytes = response_df.to_parquet(index=False, engine="pyarrow") + + self.write_back_bytes(response_bytes) + + +class ParquetFastParquetUserAgentResponder(BaseUserAgentResponder): + def do_GET(self): + response_df = self.start_processing_headers() + self.send_header("Content-Type", "application/octet-stream") + self.end_headers() + + # the fastparquet engine doesn't like to write to a buffer + # it can do it via the open_with function being set appropriately + # however it automatically calls the close method and wipes the buffer + # so just overwrite that attribute on this instance to not do that + + # protected by an importorskip in the respective test + import fsspec + + response_df.to_parquet( + "memory://fastparquet_user_agent.parquet", + index=False, + engine="fastparquet", + compression=None, + ) + with fsspec.open("memory://fastparquet_user_agent.parquet", "rb") as f: + response_bytes = f.read() + + self.write_back_bytes(response_bytes) + + +class PickleUserAgentResponder(BaseUserAgentResponder): + def do_GET(self): + response_df = self.start_processing_headers() + self.send_header("Content-Type", "application/octet-stream") + self.end_headers() + + bio = BytesIO() + response_df.to_pickle(bio) + response_bytes = bio.getvalue() + + self.write_back_bytes(response_bytes) + + +class StataUserAgentResponder(BaseUserAgentResponder): + def do_GET(self): + response_df = self.start_processing_headers() + self.send_header("Content-Type", "application/octet-stream") + self.end_headers() + + bio = BytesIO() + response_df.to_stata(bio, write_index=False) + response_bytes = bio.getvalue() + + self.write_back_bytes(response_bytes) + + +class AllHeaderCSVResponder(http.server.BaseHTTPRequestHandler): + """ + Send all request headers back for checking round trip + """ + + def do_GET(self): + response_df = pd.DataFrame(self.headers.items()) + self.send_response(200) + self.send_header("Content-Type", "text/csv") + self.end_headers() + response_bytes = response_df.to_csv(index=False).encode("utf-8") + self.wfile.write(response_bytes) + + +@pytest.mark.parametrize( + "responder, read_method, port, parquet_engine", + [ + (CSVUserAgentResponder, pd.read_csv, 34259, None), + (JSONUserAgentResponder, pd.read_json, 34260, None), + (ParquetPyArrowUserAgentResponder, pd.read_parquet, 34268, "pyarrow"), + (ParquetFastParquetUserAgentResponder, pd.read_parquet, 34273, "fastparquet"), + (PickleUserAgentResponder, pd.read_pickle, 34271, None), + (StataUserAgentResponder, pd.read_stata, 34272, None), + (GzippedCSVUserAgentResponder, pd.read_csv, 34261, None), + (GzippedJSONUserAgentResponder, pd.read_json, 34262, None), + ], +) +def test_server_and_default_headers(responder, read_method, port, parquet_engine): + if parquet_engine is not None: + pytest.importorskip(parquet_engine) + if parquet_engine == "fastparquet": + pytest.importorskip("fsspec") + + server = http.server.HTTPServer(("localhost", port), responder) + server_thread = threading.Thread(target=server.serve_forever) + server_thread.start() + if parquet_engine is None: + df_http = read_method(f"http://localhost:{port}") + else: + df_http = read_method(f"http://localhost:{port}", engine=parquet_engine) + server.shutdown() + server.server_close() + server_thread.join() + assert not df_http.empty + + +@pytest.mark.parametrize( + "responder, read_method, port, parquet_engine", + [ + (CSVUserAgentResponder, pd.read_csv, 34263, None), + (JSONUserAgentResponder, pd.read_json, 34264, None), + (ParquetPyArrowUserAgentResponder, pd.read_parquet, 34270, "pyarrow"), + (ParquetFastParquetUserAgentResponder, pd.read_parquet, 34275, "fastparquet"), + (PickleUserAgentResponder, pd.read_pickle, 34273, None), + (StataUserAgentResponder, pd.read_stata, 34274, None), + (GzippedCSVUserAgentResponder, pd.read_csv, 34265, None), + (GzippedJSONUserAgentResponder, pd.read_json, 34266, None), + ], +) +def test_server_and_custom_headers(responder, read_method, port, parquet_engine): + if parquet_engine is not None: + pytest.importorskip(parquet_engine) + if parquet_engine == "fastparquet": + pytest.importorskip("fsspec") + + custom_user_agent = "Super Cool One" + df_true = pd.DataFrame({"header": [custom_user_agent]}) + server = http.server.HTTPServer(("localhost", port), responder) + server_thread = threading.Thread(target=server.serve_forever) + server_thread.start() + + if parquet_engine is None: + df_http = read_method( + f"http://localhost:{port}", + storage_options={"User-Agent": custom_user_agent}, + ) + else: + df_http = read_method( + f"http://localhost:{port}", + storage_options={"User-Agent": custom_user_agent}, + engine=parquet_engine, + ) + server.shutdown() + + server.server_close() + server_thread.join() + + tm.assert_frame_equal(df_true, df_http) + + +@pytest.mark.parametrize( + "responder, read_method, port", + [ + (AllHeaderCSVResponder, pd.read_csv, 34267), + ], +) +def test_server_and_all_custom_headers(responder, read_method, port): + custom_user_agent = "Super Cool One" + custom_auth_token = "Super Secret One" + storage_options = { + "User-Agent": custom_user_agent, + "Auth": custom_auth_token, + } + server = http.server.HTTPServer(("localhost", port), responder) + server_thread = threading.Thread(target=server.serve_forever) + server_thread.start() + + df_http = read_method( + f"http://localhost:{port}", + storage_options=storage_options, + ) + server.shutdown() + server.server_close() + server_thread.join() + + df_http = df_http[df_http["0"].isin(storage_options.keys())] + df_http = df_http.sort_values(["0"]).reset_index() + df_http = df_http[["0", "1"]] + + keys = list(storage_options.keys()) + df_true = pd.DataFrame({"0": keys, "1": [storage_options[k] for k in keys]}) + df_true = df_true.sort_values(["0"]) + df_true = df_true.reset_index().drop(["index"], axis=1) + + tm.assert_frame_equal(df_true, df_http) + + +@pytest.mark.parametrize( + "engine", + [ + "pyarrow", + "fastparquet", + ], +) +def test_to_parquet_to_disk_with_storage_options(engine): + headers = { + "User-Agent": "custom", + "Auth": "other_custom", + } + + pytest.importorskip(engine) + + true_df = pd.DataFrame({"column_name": ["column_value"]}) + with pytest.raises(ValueError): + true_df.to_parquet("/tmp/junk.parquet", storage_options=headers, engine=engine)
- [x] closes #36688 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37966
2020-11-20T00:02:47Z
2020-12-15T00:35:00Z
2020-12-15T00:35:00Z
2021-06-07T11:55:54Z
BUG: Make DTI/TDI/PI argsort match their underlying arrays
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index e8af9da30a298..73227bb6ec159 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -374,7 +374,10 @@ def _set_grouper(self, obj: FrameOrSeries, sort: bool = False): # possibly sort if (self.sort or sort) and not ax.is_monotonic: # use stable sort to support first, last, nth - indexer = self.indexer = ax.argsort(kind="mergesort") + # TODO: why does putting na_position="first" fix datetimelike cases? + indexer = self.indexer = ax.array.argsort( + kind="mergesort", na_position="first" + ) ax = ax.take(indexer) obj = obj.take(indexer, axis=self.axis) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 40fcc824992b7..5c5fd40b39ade 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4776,10 +4776,6 @@ def argsort(self, *args, **kwargs) -> np.ndarray: >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ - if needs_i8_conversion(self.dtype): - # TODO: these do not match the underlying EA argsort methods GH#37863 - return self.asi8.argsort(*args, **kwargs) - # This works for either ndarray or EA, is overriden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index 14f9c2f9de284..c128f4ab6b7dd 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -10,6 +10,14 @@ class DatetimeLike(Base): + def test_argsort_matches_array(self): + rng = self.create_index() + rng = rng.insert(1, pd.NaT) + + result = rng.argsort() + expected = rng._data.argsort() + tm.assert_numpy_array_equal(result, expected) + def test_can_hold_identifiers(self): idx = self.create_index() key = idx[0]
- [x] closes #37863 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry The fix to keep resample working is a kludge I'd like to make unnecessary before merging; cc @mroeschke
https://api.github.com/repos/pandas-dev/pandas/pulls/37965
2020-11-19T23:43:03Z
2020-12-17T13:50:58Z
2020-12-17T13:50:58Z
2020-12-17T15:43:39Z
BUG: Bug in setitem raising ValueError when setting more than one column via array
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 9bfbc22b1e628..26aa5e3a71302 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -667,6 +667,9 @@ def _ensure_listlike_indexer(self, key, axis=None, value=None): if k not in self.obj: if value is None: self.obj[k] = np.nan + elif is_array_like(value) and value.ndim == 2: + # GH#37964 have to select columnwise in case of array + self.obj[k] = value[:, i] elif is_list_like(value): self.obj[k] = value[i] else: diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index e4c57dc2b72fc..cd3102836422f 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -289,6 +289,21 @@ def test_setitem_periodindex(self): assert isinstance(rs.index, PeriodIndex) tm.assert_index_equal(rs.index, rng) + def test_setitem_complete_column_with_array(self): + # GH#37954 + df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]}) + arr = np.array([[1, 1], [3, 1], [5, 1]]) + df[["c", "d"]] = arr + expected = DataFrame( + { + "a": ["one", "two", "three"], + "b": [1, 2, 3], + "c": [1, 3, 5], + "d": [1, 1, 1], + } + ) + tm.assert_frame_equal(df, expected) + class TestDataFrameSetItemSlicing: def test_setitem_slice_position(self):
- [x] xref #37954 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Did not add whatsnew, cause works on 1.1.4 Array to set has to have more or less rows than columns, otherwise bug won't occur. Wrong values would be set silently here. This is only a regression on master, not related to any release cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/37964
2020-11-19T23:35:59Z
2020-11-21T21:09:20Z
2020-11-21T21:09:20Z
2020-11-21T23:42:56Z
CLN: Remove duplicate from MultiIndex.equals
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 95f14bb643744..6e18b29673ca0 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3312,21 +3312,19 @@ def equals(self, other: object) -> bool: if not isinstance(other, Index): return False + if len(self) != len(other): + return False + if not isinstance(other, MultiIndex): # d-level MultiIndex can equal d-tuple Index if not is_object_dtype(other.dtype): # other cannot contain tuples, so cannot match self return False - elif len(self) != len(other): - return False return array_equivalent(self._values, other._values) if self.nlevels != other.nlevels: return False - if len(self) != len(other): - return False - for i in range(self.nlevels): self_codes = self.codes[i] self_codes = self_codes[self_codes != -1]
- [x] tests passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` really small clean up
https://api.github.com/repos/pandas-dev/pandas/pulls/37961
2020-11-19T20:11:17Z
2020-11-20T03:09:37Z
2020-11-20T03:09:37Z
2020-11-20T08:18:15Z
CLN: Add comment and clarify if condition in indexing
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 9bfbc22b1e628..3ef4666402d9a 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1821,7 +1821,7 @@ def _setitem_single_block(self, indexer, value, name: str): return indexer = maybe_convert_ix(*indexer) - if isinstance(value, ABCSeries) and name != "iloc" or isinstance(value, dict): + if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): # TODO(EA): ExtensionBlock.setitem this causes issues with # setting for extensionarrays that store dicts. Need to decide # if it's worth supporting that. @@ -1859,6 +1859,7 @@ def _setitem_with_indexer_missing(self, indexer, value): if index.is_unique: new_indexer = index.get_indexer([new_index[-1]]) if (new_indexer != -1).any(): + # We get only here with loc, so can hard code return self._setitem_with_indexer(new_indexer, value, "loc") # this preserves dtype of the value diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 84073bbb023a8..bc40079e3169b 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -818,7 +818,7 @@ def test_iloc_setitem_bool_indexer(self, klass): tm.assert_frame_equal(df, expected) @pytest.mark.parametrize("indexer", [[1], slice(1, 2)]) - def test_setitem_iloc_pure_position_based(self, indexer): + def test_iloc_setitem_pure_position_based(self, indexer): # GH#22046 df1 = DataFrame({"a2": [11, 12, 13], "b2": [14, 15, 16]}) df2 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) @@ -826,7 +826,7 @@ def test_setitem_iloc_pure_position_based(self, indexer): expected = DataFrame({"a": [1, 2, 3], "b": [11, 12, 13], "c": [7, 8, 9]}) tm.assert_frame_equal(df2, expected) - def test_setitem_iloc_dictionary_value(self): + def test_iloc_setitem_dictionary_value(self): # GH#37728 df = DataFrame({"x": [1, 2], "y": [2, 2]}) rhs = dict(x=9, y=99) @@ -1000,7 +1000,7 @@ def test_iloc_getitem_nonunique(self): ser = Series([0, 1, 2], index=[0, 1, 0]) assert ser.iloc[2] == 2 - def test_setitem_iloc_pure_position_based(self): + def test_iloc_setitem_pure_position_based(self): # GH#22046 ser1 = Series([1, 2, 3]) ser2 = Series([4, 5, 6], index=[1, 0, 2])
Implemented comments cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/37960
2020-11-19T20:06:16Z
2020-11-19T22:43:29Z
2020-11-19T22:43:29Z
2020-11-19T22:44:15Z
CLN: More tests/window/*
diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py index 64e679336abb8..a765f268cfb07 100644 --- a/pandas/tests/window/conftest.py +++ b/pandas/tests/window/conftest.py @@ -275,17 +275,9 @@ def consistency_data(request): return request.param -def _create_series(): - """Internal function to mock Series.""" - arr = np.random.randn(100) - locs = np.arange(20, 40) - arr[locs] = np.NaN - series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100)) - return series - - -def _create_frame(): - """Internal function to mock DataFrame.""" +@pytest.fixture +def frame(): + """Make mocked frame as fixture.""" return DataFrame( np.random.randn(100, 10), index=bdate_range(datetime(2009, 1, 1), periods=100), @@ -293,22 +285,14 @@ def _create_frame(): ) -@pytest.fixture -def frame(): - """Make mocked frame as fixture.""" - return _create_frame() - - @pytest.fixture def series(): """Make mocked series as fixture.""" - return _create_series() - - -@pytest.fixture(params=[_create_series(), _create_frame()]) -def which(request): - """Turn parametrized which as fixture for series and frame""" - return request.param + arr = np.random.randn(100) + locs = np.arange(20, 40) + arr[locs] = np.NaN + series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100)) + return series @pytest.fixture(params=["1 day", timedelta(days=1)]) diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index 6e5d7b4df00e1..ac77bfe0dfb48 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -1,8 +1,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd from pandas import DataFrame, Index, Series, Timestamp, concat import pandas._testing as tm @@ -238,30 +236,6 @@ def test_count_nonnumeric_types(): tm.assert_frame_equal(result, expected) -@td.skip_if_no_scipy -@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning") -def test_window_with_args(): - # make sure that we are aggregating window functions correctly with arg - r = Series(np.random.randn(100)).rolling( - window=10, min_periods=1, win_type="gaussian" - ) - expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) - expected.columns = ["<lambda>", "<lambda>"] - result = r.aggregate([lambda x: x.mean(std=10), lambda x: x.mean(std=0.01)]) - tm.assert_frame_equal(result, expected) - - def a(x): - return x.mean(std=10) - - def b(x): - return x.mean(std=0.01) - - expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) - expected.columns = ["a", "b"] - result = r.aggregate([a, b]) - tm.assert_frame_equal(result, expected) - - def test_preserve_metadata(): # GH 10565 s = Series(np.arange(100), name="foo") diff --git a/pandas/tests/window/test_apply.py b/pandas/tests/window/test_apply.py index b7343d835fa6e..076578f4dc3c4 100644 --- a/pandas/tests/window/test_apply.py +++ b/pandas/tests/window/test_apply.py @@ -1,9 +1,6 @@ import numpy as np import pytest -from pandas.errors import NumbaUtilError -import pandas.util._test_decorators as td - from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range import pandas._testing as tm @@ -133,14 +130,6 @@ def test_invalid_raw_numba(): Series(range(1)).rolling(1).apply(lambda x: x, raw=False, engine="numba") -@td.skip_if_no("numba") -def test_invalid_kwargs_nopython(): - with pytest.raises(NumbaUtilError, match="numba does not support kwargs with"): - Series(range(1)).rolling(1).apply( - lambda x: x, kwargs={"a": 1}, engine="numba", raw=True - ) - - @pytest.mark.parametrize("args_kwargs", [[None, {"par": 10}], [(10,), None]]) def test_rolling_apply_args_kwargs(args_kwargs): # GH 33433 diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py index 69cd1d1ba069c..c026f52e94482 100644 --- a/pandas/tests/window/test_ewm.py +++ b/pandas/tests/window/test_ewm.py @@ -15,9 +15,9 @@ def test_doc_string(): df.ewm(com=0.5).mean() -def test_constructor(which): +def test_constructor(frame_or_series): - c = which.ewm + c = frame_or_series(range(5)).ewm # valid c(com=0.5) diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py index ace6848a58c9c..3405502e54e70 100644 --- a/pandas/tests/window/test_expanding.py +++ b/pandas/tests/window/test_expanding.py @@ -19,10 +19,10 @@ def test_doc_string(): @pytest.mark.filterwarnings( "ignore:The `center` argument on `expanding` will be removed in the future" ) -def test_constructor(which): +def test_constructor(frame_or_series): # GH 12669 - c = which.expanding + c = frame_or_series(range(5)).expanding # valid c(min_periods=1) @@ -34,10 +34,10 @@ def test_constructor(which): @pytest.mark.filterwarnings( "ignore:The `center` argument on `expanding` will be removed in the future" ) -def test_constructor_invalid(which, w): +def test_constructor_invalid(frame_or_series, w): # not valid - c = which.expanding + c = frame_or_series(range(5)).expanding msg = "min_periods must be an integer" with pytest.raises(ValueError, match=msg): c(min_periods=w) @@ -118,30 +118,27 @@ def test_expanding_axis(axis_frame): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("constructor", [Series, DataFrame]) -def test_expanding_count_with_min_periods(constructor): +def test_expanding_count_with_min_periods(frame_or_series): # GH 26996 - result = constructor(range(5)).expanding(min_periods=3).count() - expected = constructor([np.nan, np.nan, 3.0, 4.0, 5.0]) + result = frame_or_series(range(5)).expanding(min_periods=3).count() + expected = frame_or_series([np.nan, np.nan, 3.0, 4.0, 5.0]) tm.assert_equal(result, expected) -@pytest.mark.parametrize("constructor", [Series, DataFrame]) -def test_expanding_count_default_min_periods_with_null_values(constructor): +def test_expanding_count_default_min_periods_with_null_values(frame_or_series): # GH 26996 values = [1, 2, 3, np.nan, 4, 5, 6] expected_counts = [1.0, 2.0, 3.0, 3.0, 4.0, 5.0, 6.0] - result = constructor(values).expanding().count() - expected = constructor(expected_counts) + result = frame_or_series(values).expanding().count() + expected = frame_or_series(expected_counts) tm.assert_equal(result, expected) -@pytest.mark.parametrize("constructor", [Series, DataFrame]) -def test_expanding_count_with_min_periods_exceeding_series_length(constructor): +def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_series): # GH 25857 - result = constructor(range(5)).expanding(min_periods=6).count() - expected = constructor([np.nan, np.nan, np.nan, np.nan, np.nan]) + result = frame_or_series(range(5)).expanding(min_periods=6).count() + expected = frame_or_series([np.nan, np.nan, np.nan, np.nan, np.nan]) tm.assert_equal(result, expected) @@ -246,10 +243,9 @@ def test_center_deprecate_warning(): df.expanding() -@pytest.mark.parametrize("constructor", ["DataFrame", "Series"]) -def test_expanding_sem(constructor): +def test_expanding_sem(frame_or_series): # GH: 26476 - obj = getattr(pd, constructor)([0, 1, 2]) + obj = frame_or_series([0, 1, 2]) result = obj.expanding().sem() if isinstance(result, DataFrame): result = Series(result[0].values) diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py index c4de112bd6dc0..7a75ff1cff5bc 100644 --- a/pandas/tests/window/test_groupby.py +++ b/pandas/tests/window/test_groupby.py @@ -7,9 +7,8 @@ from pandas.core.groupby.groupby import get_groupby -class TestGrouperGrouping: +class TestRolling: def setup_method(self): - self.series = Series(np.arange(10)) self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}) def test_mutated(self): @@ -152,68 +151,6 @@ def test_rolling_apply_mutability(self): result = g.rolling(window=2).sum() tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize( - "f", ["sum", "mean", "min", "max", "count", "kurt", "skew"] - ) - def test_expanding(self, f): - g = self.frame.groupby("A") - r = g.expanding() - - result = getattr(r, f)() - expected = g.apply(lambda x: getattr(x.expanding(), f)()) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("f", ["std", "var"]) - def test_expanding_ddof(self, f): - g = self.frame.groupby("A") - r = g.expanding() - - result = getattr(r, f)(ddof=0) - expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0)) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"] - ) - def test_expanding_quantile(self, interpolation): - g = self.frame.groupby("A") - r = g.expanding() - result = r.quantile(0.4, interpolation=interpolation) - expected = g.apply( - lambda x: x.expanding().quantile(0.4, interpolation=interpolation) - ) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("f", ["corr", "cov"]) - def test_expanding_corr_cov(self, f): - g = self.frame.groupby("A") - r = g.expanding() - - result = getattr(r, f)(self.frame) - - def func(x): - return getattr(x.expanding(), f)(self.frame) - - expected = g.apply(func) - tm.assert_frame_equal(result, expected) - - result = getattr(r.B, f)(pairwise=True) - - def func(x): - return getattr(x.B.expanding(), f)(pairwise=True) - - expected = g.apply(func) - tm.assert_series_equal(result, expected) - - def test_expanding_apply(self, raw): - g = self.frame.groupby("A") - r = g.expanding() - - # reduction - result = r.apply(lambda x: x.sum(), raw=raw) - expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw)) - tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]]) def test_groupby_rolling(self, expected_value, raw_value): # GH 31754 @@ -633,6 +570,73 @@ def test_groupby_rolling_index_level_and_column_label(self): tm.assert_frame_equal(result, expected) +class TestExpanding: + def setup_method(self): + self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}) + + @pytest.mark.parametrize( + "f", ["sum", "mean", "min", "max", "count", "kurt", "skew"] + ) + def test_expanding(self, f): + g = self.frame.groupby("A") + r = g.expanding() + + result = getattr(r, f)() + expected = g.apply(lambda x: getattr(x.expanding(), f)()) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("f", ["std", "var"]) + def test_expanding_ddof(self, f): + g = self.frame.groupby("A") + r = g.expanding() + + result = getattr(r, f)(ddof=0) + expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0)) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"] + ) + def test_expanding_quantile(self, interpolation): + g = self.frame.groupby("A") + r = g.expanding() + result = r.quantile(0.4, interpolation=interpolation) + expected = g.apply( + lambda x: x.expanding().quantile(0.4, interpolation=interpolation) + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("f", ["corr", "cov"]) + def test_expanding_corr_cov(self, f): + g = self.frame.groupby("A") + r = g.expanding() + + result = getattr(r, f)(self.frame) + + def func(x): + return getattr(x.expanding(), f)(self.frame) + + expected = g.apply(func) + tm.assert_frame_equal(result, expected) + + result = getattr(r.B, f)(pairwise=True) + + def func(x): + return getattr(x.B.expanding(), f)(pairwise=True) + + expected = g.apply(func) + tm.assert_series_equal(result, expected) + + def test_expanding_apply(self, raw): + g = self.frame.groupby("A") + r = g.expanding() + + # reduction + result = r.apply(lambda x: x.sum(), raw=raw) + expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw)) + tm.assert_frame_equal(result, expected) + + class TestEWM: @pytest.mark.parametrize( "method, expected_data", diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py index 3dd09bc4b752a..e890108b22c3e 100644 --- a/pandas/tests/window/test_numba.py +++ b/pandas/tests/window/test_numba.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from pandas.errors import NumbaUtilError import pandas.util._test_decorators as td from pandas import DataFrame, Series, option_context @@ -112,3 +113,11 @@ def f(x): result = s.rolling(2).apply(f, engine=None, raw=True) expected = s.rolling(2).apply(f, engine="numba", raw=True) tm.assert_series_equal(expected, result) + + +@td.skip_if_no("numba", "0.46.0") +def test_invalid_kwargs_nopython(): + with pytest.raises(NumbaUtilError, match="numba does not support kwargs with"): + Series(range(1)).rolling(1).apply( + lambda x: x, kwargs={"a": 1}, engine="numba", raw=True + ) diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 75e1a771b70ea..1cfbb57d582a3 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -4,7 +4,6 @@ import pytest from pandas.errors import UnsupportedFunctionCall -import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, Series, date_range @@ -20,10 +19,10 @@ def test_doc_string(): df.rolling(2, min_periods=1).sum() -def test_constructor(which): +def test_constructor(frame_or_series): # GH 12669 - c = which.rolling + c = frame_or_series(range(5)).rolling # valid c(0) @@ -41,10 +40,10 @@ def test_constructor(which): @pytest.mark.parametrize("w", [2.0, "foo", np.array([2])]) -def test_invalid_constructor(which, w): +def test_invalid_constructor(frame_or_series, w): # not valid - c = which.rolling + c = frame_or_series(range(5)).rolling msg = ( "window must be an integer|" @@ -62,17 +61,6 @@ def test_invalid_constructor(which, w): c(window=2, min_periods=1, center=w) -@td.skip_if_no_scipy -def test_constructor_with_win_type(which): - # GH 13383 - c = which.rolling - - msg = "window must be > 0" - - with pytest.raises(ValueError, match=msg): - c(-1, win_type="boxcar") - - @pytest.mark.parametrize("window", [timedelta(days=3), pd.Timedelta(days=3)]) def test_constructor_with_timedelta_window(window): # GH 15440 @@ -466,24 +454,22 @@ def test_min_periods1(): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize("constructor", [Series, DataFrame]) -def test_rolling_count_with_min_periods(constructor): +def test_rolling_count_with_min_periods(frame_or_series): # GH 26996 - result = constructor(range(5)).rolling(3, min_periods=3).count() - expected = constructor([np.nan, np.nan, 3.0, 3.0, 3.0]) + result = frame_or_series(range(5)).rolling(3, min_periods=3).count() + expected = frame_or_series([np.nan, np.nan, 3.0, 3.0, 3.0]) tm.assert_equal(result, expected) -@pytest.mark.parametrize("constructor", [Series, DataFrame]) -def test_rolling_count_default_min_periods_with_null_values(constructor): +def test_rolling_count_default_min_periods_with_null_values(frame_or_series): # GH 26996 values = [1, 2, 3, np.nan, 4, 5, 6] expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0] # GH 31302 with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = constructor(values).rolling(3).count() - expected = constructor(expected_counts) + result = frame_or_series(values).rolling(3).count() + expected = frame_or_series(expected_counts) tm.assert_equal(result, expected) @@ -890,10 +876,9 @@ def test_rolling_period_index(index, window, func, values): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize("constructor", ["DataFrame", "Series"]) -def test_rolling_sem(constructor): +def test_rolling_sem(frame_or_series): # GH: 26476 - obj = getattr(pd, constructor)([0, 1, 2]) + obj = frame_or_series([0, 1, 2]) result = obj.rolling(2, min_periods=1).sem() if isinstance(result, DataFrame): result = Series(result[0].values) diff --git a/pandas/tests/window/test_win_type.py b/pandas/tests/window/test_win_type.py index eab62b3383283..091b5914a7c3e 100644 --- a/pandas/tests/window/test_win_type.py +++ b/pandas/tests/window/test_win_type.py @@ -4,14 +4,14 @@ from pandas.errors import UnsupportedFunctionCall import pandas.util._test_decorators as td -import pandas as pd -from pandas import Series +from pandas import DataFrame, Series, concat +import pandas._testing as tm @td.skip_if_no_scipy -def test_constructor(which): +def test_constructor(frame_or_series): # GH 12669 - c = which.rolling + c = frame_or_series(range(5)).rolling # valid c(win_type="boxcar", window=2, min_periods=1) @@ -21,10 +21,10 @@ def test_constructor(which): @pytest.mark.parametrize("w", [2.0, "foo", np.array([2])]) @td.skip_if_no_scipy -def test_invalid_constructor(which, w): +def test_invalid_constructor(frame_or_series, w): # not valid - c = which.rolling + c = frame_or_series(range(5)).rolling with pytest.raises(ValueError, match="min_periods must be an integer"): c(win_type="boxcar", window=2, min_periods=w) with pytest.raises(ValueError, match="center must be a boolean"): @@ -33,16 +33,16 @@ def test_invalid_constructor(which, w): @pytest.mark.parametrize("wt", ["foobar", 1]) @td.skip_if_no_scipy -def test_invalid_constructor_wintype(which, wt): - c = which.rolling +def test_invalid_constructor_wintype(frame_or_series, wt): + c = frame_or_series(range(5)).rolling with pytest.raises(ValueError, match="Invalid win_type"): c(win_type=wt, window=2) @td.skip_if_no_scipy -def test_constructor_with_win_type(which, win_types): +def test_constructor_with_win_type(frame_or_series, win_types): # GH 12669 - c = which.rolling + c = frame_or_series(range(5)).rolling c(win_type=win_types, window=2) @@ -62,7 +62,7 @@ def test_numpy_compat(method): @td.skip_if_no_scipy @pytest.mark.parametrize("arg", ["median", "kurt", "skew"]) def test_agg_function_support(arg): - df = pd.DataFrame({"A": np.arange(5)}) + df = DataFrame({"A": np.arange(5)}) roll = df.rolling(2, win_type="triang") msg = f"'{arg}' is not a valid function for 'Window' object" @@ -82,3 +82,38 @@ def test_invalid_scipy_arg(): msg = r"boxcar\(\) got an unexpected" with pytest.raises(TypeError, match=msg): Series(range(3)).rolling(1, win_type="boxcar").mean(foo="bar") + + +@td.skip_if_no_scipy +def test_constructor_with_win_type_invalid(frame_or_series): + # GH 13383 + c = frame_or_series(range(5)).rolling + + msg = "window must be > 0" + + with pytest.raises(ValueError, match=msg): + c(-1, win_type="boxcar") + + +@td.skip_if_no_scipy +@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning") +def test_window_with_args(): + # make sure that we are aggregating window functions correctly with arg + r = Series(np.random.randn(100)).rolling( + window=10, min_periods=1, win_type="gaussian" + ) + expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) + expected.columns = ["<lambda>", "<lambda>"] + result = r.aggregate([lambda x: x.mean(std=10), lambda x: x.mean(std=0.01)]) + tm.assert_frame_equal(result, expected) + + def a(x): + return x.mean(std=10) + + def b(x): + return x.mean(std=0.01) + + expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) + expected.columns = ["a", "b"] + result = r.aggregate([a, b]) + tm.assert_frame_equal(result, expected)
- Utilize more `frame_or_series` fixtures - Cleaned `conftest.py` - Moved some tests to the appropriate location - Reorg `test_groupby` into `TestRolling` and `TestExpanding` classes
https://api.github.com/repos/pandas-dev/pandas/pulls/37959
2020-11-19T19:43:09Z
2020-11-19T20:47:09Z
2020-11-19T20:47:09Z
2020-11-19T20:47:37Z
BUG: DataFrame.to_html ignores formatters
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index cea42cbffa906..570c9999843c8 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -654,6 +654,7 @@ I/O - :meth:`to_excel` and :meth:`to_markdown` support writing to fsspec URLs such as S3 and Google Cloud Storage (:issue:`33987`) - Bug in :meth:`read_fw` was not skipping blank lines (even with ``skip_blank_lines=True``) (:issue:`37758`) - :meth:`read_fwf` was inferring compression with ``compression=None`` which was not consistent with the other :meth:``read_*`` functions (:issue:`37909`) +- :meth:`DataFrame.to_html` was ignoring ``formatters`` argument for ``ExtensionDtype`` columns (:issue:`36525`) Period ^^^^^^ diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 8ede35912a492..082c539d034eb 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1537,7 +1537,9 @@ class ExtensionArrayFormatter(GenericArrayFormatter): def _format_strings(self) -> List[str]: values = extract_array(self.values, extract_numpy=True) - formatter = values._formatter(boxed=True) + formatter = self.formatter + if formatter is None: + formatter = values._formatter(boxed=True) if is_categorical_dtype(values.dtype): # Categorical is special for now, so that we can preserve tzinfo @@ -1553,7 +1555,9 @@ def _format_strings(self) -> List[str]: digits=self.digits, space=self.space, justify=self.justify, + decimal=self.decimal, leading_space=self.leading_space, + quoting=self.quoting, ) return fmt_values diff --git a/pandas/tests/io/formats/data/html/various_dtypes_formatted.html b/pandas/tests/io/formats/data/html/various_dtypes_formatted.html new file mode 100644 index 0000000000000..7d2ede3379213 --- /dev/null +++ b/pandas/tests/io/formats/data/html/various_dtypes_formatted.html @@ -0,0 +1,36 @@ +<table border="1" class="dataframe"> + <thead> + <tr style="text-align: right;"> + <th></th> + <th>i</th> + <th>f</th> + <th>I</th> + <th>s</th> + <th>b</th> + <th>c</th> + <th>o</th> + </tr> + </thead> + <tbody> + <tr> + <th>0</th> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + </tr> + <tr> + <th>1</th> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + <td>formatted</td> + </tr> + </tbody> +</table> diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index f4f963d268aeb..aaadc965aca52 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -247,6 +247,21 @@ def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath): {"hod": lambda x: x.strftime("%H:%M")}, "datetime64_hourformatter", ), + ( + DataFrame( + { + "i": pd.Series([1, 2], dtype="int64"), + "f": pd.Series([1, 2], dtype="float64"), + "I": pd.Series([1, 2], dtype="Int64"), + "s": pd.Series([1, 2], dtype="string"), + "b": pd.Series([True, False], dtype="boolean"), + "c": pd.Series(["a", "b"], dtype=pd.CategoricalDtype(["a", "b"])), + "o": pd.Series([1, "2"], dtype=object), + } + ), + [lambda x: "formatted"] * 7, + "various_dtypes_formatted", + ), ], ) def test_to_html_formatters(df, formatters, expected, datapath):
- [x] closes #36525 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37958
2020-11-19T18:46:30Z
2020-11-21T00:01:07Z
2020-11-21T00:01:07Z
2020-11-26T08:06:02Z
DOC: re-use storage_options
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index abf9b3d8823aa..27713b5bde201 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -118,7 +118,7 @@ ) from pandas.core.dtypes.missing import isna, notna -from pandas.core import algorithms, common as com, nanops, ops +from pandas.core import algorithms, common as com, generic, nanops, ops from pandas.core.accessor import CachedAccessor from pandas.core.aggregation import ( aggregate, @@ -2066,6 +2066,7 @@ def _from_arrays( ) return cls(mgr) + @doc(storage_options=generic._shared_docs["storage_options"]) @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_stata( self, @@ -2118,7 +2119,7 @@ def to_stata( variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. - version : {114, 117, 118, 119, None}, default 114 + version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and @@ -2147,23 +2148,17 @@ def to_stata( compression : str or dict, default 'infer' For on-the-fly compression of the output dta. If string, specifies compression mode. If dict, value at key 'method' specifies - compression mode. Compression mode must be one of {'infer', 'gzip', - 'bz2', 'zip', 'xz', None}. If compression mode is 'infer' and + compression mode. Compression mode must be one of {{'infer', 'gzip', + 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and `fname` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no - compression). If dict and compression mode is one of {'zip', - 'gzip', 'bz2'}, or inferred as one of the above, other entries + compression). If dict and compression mode is one of {{'zip', + 'gzip', 'bz2'}}, or inferred as one of the above, other entries passed as additional compression options. .. versionadded:: 1.1.0 - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 @@ -2186,9 +2181,9 @@ def to_stata( Examples -------- - >>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon', + >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], - ... 'speed': [350, 18, 361, 15]}) + ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): @@ -2255,6 +2250,7 @@ def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None: @doc( Series.to_markdown, klass=_shared_doc_kwargs["klass"], + storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( @@ -2307,6 +2303,7 @@ def to_markdown( handles.handle.writelines(result) return None + @doc(storage_options=generic._shared_docs["storage_options"]) @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_parquet( self, @@ -2340,12 +2337,12 @@ def to_parquet( Previously this was "fname" - engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' + engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. - compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' + compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. @@ -2365,13 +2362,7 @@ def to_parquet( .. versionadded:: 0.24.0 - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 @@ -2398,7 +2389,7 @@ def to_parquet( Examples -------- - >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) + >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 73f1e7127dca4..8b71ff83400d1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2024,7 +2024,7 @@ def _repr_data_resource_(self): # I/O Methods @final - @doc(klass="object") + @doc(klass="object", storage_options=_shared_docs["storage_options"]) def to_excel( self, excel_writer, @@ -2101,10 +2101,7 @@ def to_excel( freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". + {storage_options} .. versionadded:: 1.2.0 @@ -2185,6 +2182,7 @@ def to_excel( ) @final + @doc(storage_options=_shared_docs["storage_options"]) def to_json( self, path_or_buf: Optional[FilePathOrBuffer] = None, @@ -2217,27 +2215,27 @@ def to_json( * Series: - default is 'index' - - allowed values are: {'split', 'records', 'index', 'table'}. + - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - - allowed values are: {'split', 'records', 'index', 'columns', - 'values', 'table'}. + - allowed values are: {{'split', 'records', 'index', 'columns', + 'values', 'table'}}. * The format of the JSON string: - - 'split' : dict like {'index' -> [index], 'columns' -> [columns], - 'data' -> [values]} - - 'records' : list like [{column -> value}, ... , {column -> value}] - - 'index' : dict like {index -> {column -> value}} - - 'columns' : dict like {column -> {index -> value}} + - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], + 'data' -> [values]}} + - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] + - 'index' : dict like {{index -> {{column -> value}}}} + - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - - 'table' : dict like {'schema': {schema}, 'data': {data}} + - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. - date_format : {None, 'epoch', 'iso'} + date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, @@ -2260,7 +2258,7 @@ def to_json( throw ValueError if incorrect 'orient' since others are not list like. - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} + compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}} A string representing the compression to use in the output file, only used when the first argument is a filename. By default, the @@ -2277,13 +2275,7 @@ def to_json( .. versionadded:: 1.0.0 - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 @@ -2320,7 +2312,7 @@ def to_json( >>> result = df.to_json(orient="split") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP - { + {{ "columns": [ "col 1", "col 2" @@ -2339,7 +2331,7 @@ def to_json( "d" ] ] - } + }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. @@ -2348,14 +2340,14 @@ def to_json( >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP [ - { + {{ "col 1": "a", "col 2": "b" - }, - { + }}, + {{ "col 1": "c", "col 2": "d" - } + }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: @@ -2363,32 +2355,32 @@ def to_json( >>> result = df.to_json(orient="index") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP - { - "row 1": { + {{ + "row 1": {{ "col 1": "a", "col 2": "b" - }, - "row 2": { + }}, + "row 2": {{ "col 1": "c", "col 2": "d" - } - } + }} + }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP - { - "col 1": { + {{ + "col 1": {{ "row 1": "a", "row 2": "c" - }, - "col 2": { + }}, + "col 2": {{ "row 1": "b", "row 2": "d" - } - } + }} + }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: @@ -2411,40 +2403,40 @@ def to_json( >>> result = df.to_json(orient="table") >>> parsed = json.loads(result) >>> json.dumps(parsed, indent=4) # doctest: +SKIP - { - "schema": { + {{ + "schema": {{ "fields": [ - { + {{ "name": "index", "type": "string" - }, - { + }}, + {{ "name": "col 1", "type": "string" - }, - { + }}, + {{ "name": "col 2", "type": "string" - } + }} ], "primaryKey": [ "index" ], "pandas_version": "0.20.0" - }, + }}, "data": [ - { + {{ "index": "row 1", "col 1": "a", "col 2": "b" - }, - { + }}, + {{ "index": "row 2", "col 1": "c", "col 2": "d" - } + }} ] - } + }} """ from pandas.io import json @@ -2783,6 +2775,7 @@ def to_sql( ) @final + @doc(storage_options=_shared_docs["storage_options"]) def to_pickle( self, path, @@ -2797,7 +2790,7 @@ def to_pickle( ---------- path : str File path where the pickled object will be stored. - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \ + compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, \ default 'infer' A string representing the compression to use in the output file. By default, infers from the file extension in specified path. @@ -2809,13 +2802,7 @@ def to_pickle( .. [1] https://docs.python.org/3/library/pickle.html. - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 @@ -2828,7 +2815,7 @@ def to_pickle( Examples -------- - >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) + >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) >>> original_df foo bar 0 0 5 @@ -3193,6 +3180,7 @@ def to_latex( ) @final + @doc(storage_options=_shared_docs["storage_options"]) def to_csv( self, path_or_buf: Optional[FilePathOrBuffer] = None, @@ -3272,11 +3260,11 @@ def to_csv( compression : str or dict, default 'infer' If str, represents compression mode. If dict, value at 'method' is the compression mode. Compression mode may be any of the following - possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If + possible values: {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and `path_or_buf` is path-like, then detect compression mode from the following extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given - and mode is one of {'zip', 'gzip', 'bz2'}, or inferred as + and mode is one of {{'zip', 'gzip', 'bz2'}}, or inferred as one of the above, other entries passed as additional compression options. @@ -3333,13 +3321,7 @@ def to_csv( .. versionadded:: 1.1.0 - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 @@ -3356,9 +3338,9 @@ def to_csv( Examples -------- - >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], + >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], - ... 'weapon': ['sai', 'bo staff']}) + ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' diff --git a/pandas/core/series.py b/pandas/core/series.py index 42a87b003f634..d59e72a04209c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1428,6 +1428,7 @@ def to_string( @doc( klass=_shared_doc_kwargs["klass"], + storage_options=generic._shared_docs["storage_options"], examples=dedent( """ Examples @@ -1466,14 +1467,7 @@ def to_markdown( Add index (row) labels. .. versionadded:: 1.1.0 - - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index cc918c27b5c2e..7085761ca4d99 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -325,3 +325,13 @@ 1 1.000000 2.718282 2 1.414214 7.389056 """ + +_shared_docs[ + "storage_options" +] = """storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a non-fsspec URL. + See the fsspec and backend storage implementation docs for the set of + allowed keys and values.""" diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 9e63976bf8cf9..422677771b4d0 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -4,12 +4,15 @@ from pandas._typing import FilePathOrBuffer, StorageOptions from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc from pandas import DataFrame, Int64Index, RangeIndex +from pandas.core import generic from pandas.io.common import get_handle +@doc(storage_options=generic._shared_docs["storage_options"]) def to_feather( df: DataFrame, path: FilePathOrBuffer[AnyStr], @@ -23,13 +26,7 @@ def to_feather( ---------- df : DataFrame path : string file path, or file-like object - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 @@ -83,6 +80,7 @@ def to_feather( feather.write_feather(df, handles.handle, **kwargs) +@doc(storage_options=generic._shared_docs["storage_options"]) def read_feather( path, columns=None, use_threads: bool = True, storage_options: StorageOptions = None ): @@ -111,13 +109,7 @@ def read_feather( Whether to parallelize reading using multiple threads. .. versionadded:: 0.24.0 - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index fe471c6f6f9ac..842cf4b555b3e 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -11,12 +11,14 @@ import numpy as np from pandas._typing import Label, StorageOptions +from pandas.util._decorators import doc from pandas.core.dtypes import missing from pandas.core.dtypes.common import is_float, is_scalar from pandas.core.dtypes.generic import ABCIndex from pandas import DataFrame, Index, MultiIndex, PeriodIndex +from pandas.core import generic import pandas.core.common as com from pandas.io.formats.css import CSSResolver, CSSWarning @@ -776,6 +778,7 @@ def get_formatted_cells(self): cell.val = self._format_value(cell.val) yield cell + @doc(storage_options=generic._shared_docs["storage_options"]) def write( self, writer, @@ -802,10 +805,7 @@ def write( write engine to use if writer is a path - you can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". + {storage_options} .. versionadded:: 1.2.0 """ diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 2f3416cbf2d87..4b7a5e76cb475 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1,7 +1,6 @@ """ Module for applying conditional formatting to DataFrames and Series. """ - from collections import defaultdict from contextlib import contextmanager import copy @@ -33,6 +32,7 @@ import pandas as pd from pandas.api.types import is_dict_like, is_list_like +from pandas.core import generic import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame @@ -204,7 +204,11 @@ def _repr_html_(self) -> str: """ return self.render() - @doc(NDFrame.to_excel, klass="Styler") + @doc( + NDFrame.to_excel, + klass="Styler", + storage_options=generic._shared_docs["storage_options"], + ) def to_excel( self, excel_writer, diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 1f62b6a8096a8..8129d58d5cb34 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -16,11 +16,12 @@ StorageOptions, ) from pandas.errors import AbstractMethodError -from pandas.util._decorators import deprecate_kwarg, deprecate_nonkeyword_arguments +from pandas.util._decorators import deprecate_kwarg, deprecate_nonkeyword_arguments, doc from pandas.core.dtypes.common import ensure_str, is_period_dtype from pandas import DataFrame, MultiIndex, Series, isna, to_datetime +from pandas.core import generic from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.generic import NDFrame from pandas.core.reshape.concat import concat @@ -286,6 +287,7 @@ def obj_to_write(self) -> Union[NDFrame, Mapping[IndexLabel, Any]]: return {"schema": self.schema, "data": self.obj} +@doc(storage_options=generic._shared_docs["storage_options"]) @deprecate_kwarg(old_arg_name="numpy", new_arg_name=None) @deprecate_nonkeyword_arguments( version="2.0", allowed_args=["path_or_buf"], stacklevel=3 @@ -332,11 +334,11 @@ def read_json( The set of possible orients is: - ``'split'`` : dict like - ``{index -> [index], columns -> [columns], data -> [values]}`` + ``{{index -> [index], columns -> [columns], data -> [values]}}`` - ``'records'`` : list like - ``[{column -> value}, ... , {column -> value}]`` - - ``'index'`` : dict like ``{index -> {column -> value}}`` - - ``'columns'`` : dict like ``{column -> {index -> value}}`` + ``[{{column -> value}}, ... , {{column -> value}}]`` + - ``'index'`` : dict like ``{{index -> {{column -> value}}}}`` + - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}`` - ``'values'`` : just the values array The allowed and default values depend on the value @@ -344,21 +346,21 @@ def read_json( * when ``typ == 'series'``, - - allowed orients are ``{'split','records','index'}`` + - allowed orients are ``{{'split','records','index'}}`` - default is ``'index'`` - The Series index must be unique for orient ``'index'``. * when ``typ == 'frame'``, - - allowed orients are ``{'split','records','index', - 'columns','values', 'table'}`` + - allowed orients are ``{{'split','records','index', + 'columns','values', 'table'}}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. - typ : {'frame', 'series'}, default 'frame' + typ : {{'frame', 'series'}}, default 'frame' The type of object to recover. dtype : bool or dict, default None @@ -435,7 +437,7 @@ def read_json( This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' + compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, zip or xz if path_or_buf is a string ending in '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression @@ -449,13 +451,7 @@ def read_json( .. versionadded:: 1.1 - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 @@ -489,9 +485,9 @@ def read_json( Encoding/decoding a Dataframe using ``'split'`` formatted JSON: >>> df.to_json(orient='split') - '{"columns":["col 1","col 2"], + '{{"columns":["col 1","col 2"], "index":["row 1","row 2"], - "data":[["a","b"],["c","d"]]}' + "data":[["a","b"],["c","d"]]}}' >>> pd.read_json(_, orient='split') col 1 col 2 row 1 a b @@ -500,7 +496,7 @@ def read_json( Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') - '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' + '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}' >>> pd.read_json(_, orient='index') col 1 col 2 row 1 a b @@ -510,7 +506,7 @@ def read_json( Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') - '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' + '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]' >>> pd.read_json(_, orient='records') col 1 col 2 0 a b @@ -519,13 +515,13 @@ def read_json( Encoding with Table Schema >>> df.to_json(orient='table') - '{"schema": {"fields": [{"name": "index", "type": "string"}, - {"name": "col 1", "type": "string"}, - {"name": "col 2", "type": "string"}], + '{{"schema": {{"fields": [{{"name": "index", "type": "string"}}, + {{"name": "col 1", "type": "string"}}, + {{"name": "col 2", "type": "string"}}], "primaryKey": "index", - "pandas_version": "0.20.0"}, - "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, - {"index": "row 2", "col 1": "c", "col 2": "d"}]}' + "pandas_version": "0.20.0"}}, + "data": [{{"index": "row 1", "col 1": "a", "col 2": "b"}}, + {{"index": "row 2", "col 1": "c", "col 2": "d"}}]}}' """ if orient == "table" and dtype: raise ValueError("cannot pass both dtype and orient='table'") diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 10c70b9a5c43a..a19b132a7891d 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -8,8 +8,10 @@ from pandas._typing import FilePathOrBuffer, StorageOptions from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc from pandas import DataFrame, MultiIndex, get_option +from pandas.core import generic from pandas.io.common import IOHandles, get_handle, is_fsspec_url, stringify_path @@ -280,6 +282,7 @@ def read( return result +@doc(storage_options=generic._shared_docs["storage_options"]) def to_parquet( df: DataFrame, path: Optional[FilePathOrBuffer] = None, @@ -306,12 +309,12 @@ def to_parquet( .. versionchanged:: 1.2.0 - engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' + engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. - compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' + compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If @@ -331,13 +334,7 @@ def to_parquet( .. versionadded:: 0.24.0 - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values + {storage_options} .. versionadded:: 1.2.0 diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 8d9787a9c8c9e..eb29f6c0d0c48 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -62,7 +62,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.missing import isna -from pandas.core import algorithms +from pandas.core import algorithms, generic from pandas.core.arrays import Categorical from pandas.core.frame import DataFrame from pandas.core.indexes.api import ( @@ -355,13 +355,7 @@ .. versionchanged:: 1.2 -storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. +{storage_options} .. versionadded:: 1.2 @@ -532,6 +526,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): func_name="read_csv", summary="Read a comma-separated values (csv) file into DataFrame.", _default_sep="','", + storage_options=generic._shared_docs["storage_options"], ) ) def read_csv( @@ -611,6 +606,7 @@ def read_csv( func_name="read_table", summary="Read general delimited file into DataFrame.", _default_sep=r"'\\t' (tab-stop)", + storage_options=generic._shared_docs["storage_options"], ) ) def read_table( diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 7d09029aded1b..a5507259b7b6a 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -5,10 +5,14 @@ from pandas._typing import CompressionOptions, FilePathOrBuffer, StorageOptions from pandas.compat import pickle_compat as pc +from pandas.util._decorators import doc + +from pandas.core import generic from pandas.io.common import get_handle +@doc(storage_options=generic._shared_docs["storage_options"]) def to_pickle( obj: Any, filepath_or_buffer: FilePathOrBuffer, @@ -29,7 +33,7 @@ def to_pickle( .. versionchanged:: 1.0.0 Accept URL. URL has to be of S3 or GCS. - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' + compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer' If 'infer' and 'path_or_url' is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression) If 'infer' and 'path_or_url' is not path-like, then use @@ -43,13 +47,7 @@ def to_pickle( protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 @@ -64,7 +62,7 @@ def to_pickle( Examples -------- - >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) + >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) >>> original_df foo bar 0 0 5 @@ -99,6 +97,7 @@ def to_pickle( pickle.dump(obj, handles.handle, protocol=protocol) # type: ignore[arg-type] +@doc(storage_options=generic._shared_docs["storage_options"]) def read_pickle( filepath_or_buffer: FilePathOrBuffer, compression: CompressionOptions = "infer", @@ -120,19 +119,13 @@ def read_pickle( .. versionchanged:: 1.0.0 Accept URL. URL is not limited to S3 and GCS. - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' + compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer' If 'infer' and 'path_or_url' is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression) If 'infer' and 'path_or_url' is not path-like, then use None (= no decompression). - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values. + {storage_options} .. versionadded:: 1.2.0 @@ -154,7 +147,7 @@ def read_pickle( Examples -------- - >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) + >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) >>> original_df foo bar 0 0 5 diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 1f8d9b6213a71..e8b61c3c40291 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -31,7 +31,7 @@ Label, StorageOptions, ) -from pandas.util._decorators import Appender +from pandas.util._decorators import Appender, doc from pandas.core.dtypes.common import ( ensure_object, @@ -49,6 +49,7 @@ to_datetime, to_timedelta, ) +from pandas.core import generic from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series @@ -2060,6 +2061,7 @@ def _dtype_to_default_stata_fmt( raise NotImplementedError(f"Data type {dtype} not supported.") +@doc(storage_options=generic._shared_docs["storage_options"]) class StataWriter(StataParser): """ A class for writing Stata binary dta files @@ -2094,22 +2096,16 @@ class StataWriter(StataParser): compression : str or dict, default 'infer' For on-the-fly compression of the output dta. If string, specifies compression mode. If dict, value at key 'method' specifies compression - mode. Compression mode must be one of {'infer', 'gzip', 'bz2', 'zip', - 'xz', None}. If compression mode is 'infer' and `fname` is path-like, + mode. Compression mode must be one of {{'infer', 'gzip', 'bz2', 'zip', + 'xz', None}}. If compression mode is 'infer' and `fname` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). If dict and compression - mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above, + mode is one of {{'zip', 'gzip', 'bz2'}}, or inferred as one of the above, other entries passed as additional compression options. .. versionadded:: 1.1.0 - storage_options : dict, optional - Extra options that make sense for a particular storage connection, e.g. - host, port, username, password, etc., if using a URL that will - be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error - will be raised if providing this argument with a local path or - a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values + {storage_options} .. versionadded:: 1.2.0 @@ -2137,14 +2133,14 @@ class StataWriter(StataParser): >>> writer.write_file() Directly write a zip file - >>> compression = {"method": "zip", "archive_name": "data_file.dta"} + >>> compression = {{"method": "zip", "archive_name": "data_file.dta"}} >>> writer = StataWriter('./data_file.zip', data, compression=compression) >>> writer.write_file() Save a DataFrame with dates >>> from datetime import datetime >>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date']) - >>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'}) + >>> writer = StataWriter('./date_data_file.dta', data, {{'date' : 'tw'}}) >>> writer.write_file() """
The `{{ }}` are necessary so that `format` doesn't complain about missing keys. TODO: - `compression` can be re-used
https://api.github.com/repos/pandas-dev/pandas/pulls/37953
2020-11-19T06:03:21Z
2020-11-19T21:16:54Z
2020-11-19T21:16:54Z
2020-11-19T21:17:00Z
TST: add error message match for raise in test_datetimelike.py GH30999
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 94a5406eb1f8f..159f52a4c7c25 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1,3 +1,4 @@ +import re from typing import Type, Union import numpy as np @@ -302,10 +303,22 @@ def test_searchsorted_castable_strings(self, arr1d, box): expected = np.array([1, 2], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) - with pytest.raises(TypeError): + with pytest.raises( + TypeError, + match=re.escape( + f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " + "or array of those. Got 'str' instead." + ), + ): arr.searchsorted("foo") - with pytest.raises(TypeError): + with pytest.raises( + TypeError, + match=re.escape( + f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " + "or array of those. Got 'StringArray' instead." + ), + ): arr.searchsorted([str(arr[1]), "baz"]) def test_getitem_2d(self, arr1d):
Reference #30999 - [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37952
2020-11-19T05:49:50Z
2020-11-20T09:15:44Z
2020-11-20T09:15:44Z
2020-11-20T19:38:53Z
CLN: test_ewm_rolling_consistency.py
diff --git a/pandas/tests/window/common.py b/pandas/tests/window/common.py deleted file mode 100644 index 7c8c9de40f7c5..0000000000000 --- a/pandas/tests/window/common.py +++ /dev/null @@ -1,147 +0,0 @@ -import numpy as np - -from pandas import Series -import pandas._testing as tm - - -def moments_consistency_mock_mean(x, mean, mock_mean): - mean_x = mean(x) - # check that correlation of a series with itself is either 1 or NaN - - if mock_mean: - # check that mean equals mock_mean - expected = mock_mean(x) - tm.assert_equal(mean_x, expected.astype("float64")) - - -def moments_consistency_is_constant(x, is_constant, min_periods, count, mean, corr): - count_x = count(x) - mean_x = mean(x) - # check that correlation of a series with itself is either 1 or NaN - corr_x_x = corr(x, x) - - if is_constant: - exp = x.max() if isinstance(x, Series) else x.max().max() - - # check mean of constant series - expected = x * np.nan - expected[count_x >= max(min_periods, 1)] = exp - tm.assert_equal(mean_x, expected) - - # check correlation of constant series with itself is NaN - expected[:] = np.nan - tm.assert_equal(corr_x_x, expected) - - -def moments_consistency_var_debiasing_factors( - x, var_biased, var_unbiased, var_debiasing_factors -): - if var_unbiased and var_biased and var_debiasing_factors: - # check variance debiasing factors - var_unbiased_x = var_unbiased(x) - var_biased_x = var_biased(x) - var_debiasing_factors_x = var_debiasing_factors(x) - tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) - - -def moments_consistency_var_data( - x, is_constant, min_periods, count, mean, var_unbiased, var_biased -): - count_x = count(x) - mean_x = mean(x) - for var in [var_biased, var_unbiased]: - var_x = var(x) - assert not (var_x < 0).any().any() - - if var is var_biased: - # check that biased var(x) == mean(x^2) - mean(x)^2 - mean_x2 = mean(x * x) - tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) - - if is_constant: - # check that variance of constant series is identically 0 - assert not (var_x > 0).any().any() - expected = x * np.nan - expected[count_x >= max(min_periods, 1)] = 0.0 - if var is var_unbiased: - expected[count_x < 2] = np.nan - tm.assert_equal(var_x, expected) - - -def moments_consistency_std_data(x, std_unbiased, var_unbiased, std_biased, var_biased): - for (std, var) in [(std_biased, var_biased), (std_unbiased, var_unbiased)]: - var_x = var(x) - std_x = std(x) - assert not (var_x < 0).any().any() - assert not (std_x < 0).any().any() - - # check that var(x) == std(x)^2 - tm.assert_equal(var_x, std_x * std_x) - - -def moments_consistency_cov_data(x, cov_unbiased, var_unbiased, cov_biased, var_biased): - for (cov, var) in [(cov_biased, var_biased), (cov_unbiased, var_unbiased)]: - var_x = var(x) - assert not (var_x < 0).any().any() - if cov: - cov_x_x = cov(x, x) - assert not (cov_x_x < 0).any().any() - - # check that var(x) == cov(x, x) - tm.assert_equal(var_x, cov_x_x) - - -def moments_consistency_series_data( - x, - corr, - mean, - std_biased, - std_unbiased, - cov_unbiased, - var_unbiased, - var_biased, - cov_biased, -): - if isinstance(x, Series): - y = x - mean_x = mean(x) - if not x.isna().equals(y.isna()): - # can only easily test two Series with similar - # structure - pass - - # check that cor(x, y) is symmetric - corr_x_y = corr(x, y) - corr_y_x = corr(y, x) - tm.assert_equal(corr_x_y, corr_y_x) - - for (std, var, cov) in [ - (std_biased, var_biased, cov_biased), - (std_unbiased, var_unbiased, cov_unbiased), - ]: - var_x = var(x) - std_x = std(x) - - if cov: - # check that cov(x, y) is symmetric - cov_x_y = cov(x, y) - cov_y_x = cov(y, x) - tm.assert_equal(cov_x_y, cov_y_x) - - # check that cov(x, y) == (var(x+y) - var(x) - - # var(y)) / 2 - var_x_plus_y = var(x + y) - var_y = var(y) - tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y)) - - # check that corr(x, y) == cov(x, y) / (std(x) * - # std(y)) - std_y = std(y) - tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) - - if cov is cov_biased: - # check that biased cov(x, y) == mean(x*y) - - # mean(x)*mean(y) - mean_y = mean(y) - mean_x_times_y = mean(x * y) - tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y)) diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py index e1d7635b0a686..64e679336abb8 100644 --- a/pandas/tests/window/conftest.py +++ b/pandas/tests/window/conftest.py @@ -10,6 +10,7 @@ @pytest.fixture(params=[True, False]) def raw(request): + """raw keyword argument for rolling.apply""" return request.param @@ -274,43 +275,22 @@ def consistency_data(request): return request.param -def _create_arr(): - """Internal function to mock an array.""" +def _create_series(): + """Internal function to mock Series.""" arr = np.random.randn(100) locs = np.arange(20, 40) arr[locs] = np.NaN - return arr - - -def _create_rng(): - """Internal function to mock date range.""" - rng = bdate_range(datetime(2009, 1, 1), periods=100) - return rng - - -def _create_series(): - """Internal function to mock Series.""" - arr = _create_arr() - series = Series(arr.copy(), index=_create_rng()) + series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100)) return series def _create_frame(): """Internal function to mock DataFrame.""" - rng = _create_rng() - return DataFrame(np.random.randn(100, 10), index=rng, columns=np.arange(10)) - - -@pytest.fixture -def nan_locs(): - """Make a range as loc fixture.""" - return np.arange(20, 40) - - -@pytest.fixture -def arr(): - """Make an array as fixture.""" - return _create_arr() + return DataFrame( + np.random.randn(100, 10), + index=bdate_range(datetime(2009, 1, 1), periods=100), + columns=np.arange(10), + ) @pytest.fixture diff --git a/pandas/tests/window/moments/conftest.py b/pandas/tests/window/moments/conftest.py deleted file mode 100644 index ce4d04a9bcc1e..0000000000000 --- a/pandas/tests/window/moments/conftest.py +++ /dev/null @@ -1,19 +0,0 @@ -import numpy as np -import pytest - -from pandas import Series - - -@pytest.fixture -def binary_ew_data(): - A = Series(np.random.randn(50), index=np.arange(50)) - B = A[2:] + np.random.randn(48) - - A[:10] = np.NaN - B[-10:] = np.NaN - return A, B - - -@pytest.fixture(params=[0, 1, 2]) -def min_periods(request): - return request.param diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py index 2718bdabee96a..aa3453680190b 100644 --- a/pandas/tests/window/moments/test_moments_consistency_ewm.py +++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py @@ -3,15 +3,6 @@ from pandas import DataFrame, Series, concat import pandas._testing as tm -from pandas.tests.window.common import ( - moments_consistency_cov_data, - moments_consistency_is_constant, - moments_consistency_mock_mean, - moments_consistency_series_data, - moments_consistency_std_data, - moments_consistency_var_data, - moments_consistency_var_debiasing_factors, -) @pytest.mark.parametrize("func", ["cov", "corr"]) @@ -25,18 +16,28 @@ def test_ewm_pairwise_cov_corr(func, frame): @pytest.mark.parametrize("name", ["cov", "corr"]) -def test_ewm_corr_cov(name, binary_ew_data): - A, B = binary_ew_data +def test_ewm_corr_cov(name): + A = Series(np.random.randn(50), index=np.arange(50)) + B = A[2:] + np.random.randn(48) + + A[:10] = np.NaN + B[-10:] = np.NaN result = getattr(A.ewm(com=20, min_periods=5), name)(B) assert np.isnan(result.values[:14]).all() assert not np.isnan(result.values[14:]).any() +@pytest.mark.parametrize("min_periods", [0, 1, 2]) @pytest.mark.parametrize("name", ["cov", "corr"]) -def test_ewm_corr_cov_min_periods(name, min_periods, binary_ew_data): +def test_ewm_corr_cov_min_periods(name, min_periods): # GH 7898 - A, B = binary_ew_data + A = Series(np.random.randn(50), index=np.arange(50)) + B = A[2:] + np.random.randn(48) + + A[:10] = np.NaN + B[-10:] = np.NaN + result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B) # binary functions (ewmcov, ewmcorr) with bias=False require at # least two values @@ -56,248 +57,274 @@ def test_ewm_corr_cov_min_periods(name, min_periods, binary_ew_data): @pytest.mark.parametrize("name", ["cov", "corr"]) -def test_different_input_array_raise_exception(name, binary_ew_data): +def test_different_input_array_raise_exception(name): + A = Series(np.random.randn(50), index=np.arange(50)) + A[:10] = np.NaN - A, _ = binary_ew_data msg = "Input arrays must be of the same type!" # exception raised is Exception with pytest.raises(Exception, match=msg): getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50)) -@pytest.mark.slow -@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -@pytest.mark.parametrize("adjust", [True, False]) -@pytest.mark.parametrize("ignore_na", [True, False]) -def test_ewm_consistency(consistency_data, min_periods, adjust, ignore_na): - def _weights(s, com, adjust, ignore_na): - if isinstance(s, DataFrame): - if not len(s.columns): - return DataFrame(index=s.index, columns=s.columns) - w = concat( - [ - _weights(s.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na) - for i, _ in enumerate(s.columns) - ], - axis=1, - ) - w.index = s.index - w.columns = s.columns - return w - - w = Series(np.nan, index=s.index) - alpha = 1.0 / (1.0 + com) - if ignore_na: - w[s.notna()] = _weights( - s[s.notna()], com=com, adjust=adjust, ignore_na=False - ) - elif adjust: - for i in range(len(s)): - if s.iat[i] == s.iat[i]: - w.iat[i] = pow(1.0 / (1.0 - alpha), i) - else: - sum_wts = 0.0 - prev_i = -1 - for i in range(len(s)): - if s.iat[i] == s.iat[i]: - if prev_i == -1: - w.iat[i] = 1.0 - else: - w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, i - prev_i) - sum_wts += w.iat[i] - prev_i = i +def create_mock_weights(obj, com, adjust, ignore_na): + if isinstance(obj, DataFrame): + if not len(obj.columns): + return DataFrame(index=obj.index, columns=obj.columns) + w = concat( + [ + create_mock_series_weights( + obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na + ) + for i, _ in enumerate(obj.columns) + ], + axis=1, + ) + w.index = obj.index + w.columns = obj.columns return w + else: + return create_mock_series_weights(obj, com, adjust, ignore_na) - def _variance_debiasing_factors(s, com, adjust, ignore_na): - weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) - cum_sum = weights.cumsum().fillna(method="ffill") - cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill") - numerator = cum_sum * cum_sum - denominator = numerator - cum_sum_sq - denominator[denominator <= 0.0] = np.nan - return numerator / denominator - - def _ewma(s, com, min_periods, adjust, ignore_na): - weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na) - result = ( - s.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill") - ) - result[ - s.expanding().count() < (max(min_periods, 1) if min_periods else 1) - ] = np.nan - return result +def create_mock_series_weights(s, com, adjust, ignore_na): + w = Series(np.nan, index=s.index) + alpha = 1.0 / (1.0 + com) + if adjust: + count = 0 + for i in range(len(s)): + if s.iat[i] == s.iat[i]: + w.iat[i] = pow(1.0 / (1.0 - alpha), count) + count += 1 + elif not ignore_na: + count += 1 + else: + sum_wts = 0.0 + prev_i = -1 + count = 0 + for i in range(len(s)): + if s.iat[i] == s.iat[i]: + if prev_i == -1: + w.iat[i] = 1.0 + else: + w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, count - prev_i) + sum_wts += w.iat[i] + prev_i = count + count += 1 + elif not ignore_na: + count += 1 + return w + + +@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) +def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods): x, is_constant, no_nans = consistency_data com = 3.0 - moments_consistency_mock_mean( - x=x, - mean=lambda x: x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).mean(), - mock_mean=lambda x: _ewma( - x, com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ), + + result = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).mean() + weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na) + expected = ( + x.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill") ) + expected[ + x.expanding().count() < (max(min_periods, 1) if min_periods else 1) + ] = np.nan + tm.assert_equal(result, expected.astype("float64")) + + +@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) +def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_periods): + x, is_constant, no_nans = consistency_data + com = 3.0 - moments_consistency_is_constant( - x=x, - is_constant=is_constant, - min_periods=min_periods, - count=lambda x: x.expanding().count(), - mean=lambda x: x.ewm( + if is_constant: + count_x = x.expanding().count() + mean_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).mean(), - corr=lambda x, y: x.ewm( + ).mean() + # check that correlation of a series with itself is either 1 or NaN + corr_x_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).corr(y), - ) + ).corr(x) + exp = x.max() if isinstance(x, Series) else x.max().max() - moments_consistency_var_debiasing_factors( - x=x, - var_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=False) - ), - var_biased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=True) - ), - var_debiasing_factors=lambda x: ( - _variance_debiasing_factors(x, com=com, adjust=adjust, ignore_na=ignore_na) - ), - ) + # check mean of constant series + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = exp + tm.assert_equal(mean_x, expected) + + # check correlation of constant series with itself is NaN + expected[:] = np.nan + tm.assert_equal(corr_x_x, expected) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -@pytest.mark.parametrize("adjust", [True, False]) -@pytest.mark.parametrize("ignore_na", [True, False]) -def test_ewm_consistency_var(consistency_data, min_periods, adjust, ignore_na): +def test_ewm_consistency_var_debiasing_factors( + consistency_data, adjust, ignore_na, min_periods +): x, is_constant, no_nans = consistency_data com = 3.0 - moments_consistency_var_data( - x=x, - is_constant=is_constant, - min_periods=min_periods, - count=lambda x: x.expanding().count(), - mean=lambda x: x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).mean(), - var_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=False) - ), - var_biased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=True) - ), - ) + + # check variance debiasing factors + var_unbiased_x = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=False) + var_biased_x = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=True) + + weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na) + cum_sum = weights.cumsum().fillna(method="ffill") + cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill") + numerator = cum_sum * cum_sum + denominator = numerator - cum_sum_sq + denominator[denominator <= 0.0] = np.nan + var_debiasing_factors_x = numerator / denominator + + tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -@pytest.mark.parametrize("adjust", [True, False]) -@pytest.mark.parametrize("ignore_na", [True, False]) -def test_ewm_consistency_std(consistency_data, min_periods, adjust, ignore_na): +@pytest.mark.parametrize("bias", [True, False]) +def test_moments_consistency_var( + consistency_data, adjust, ignore_na, min_periods, bias +): x, is_constant, no_nans = consistency_data com = 3.0 - moments_consistency_std_data( - x=x, - var_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=False) - ), - std_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).std(bias=False) - ), - var_biased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=True) - ), - std_biased=lambda x: x.ewm( + + mean_x = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).mean() + var_x = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=bias) + assert not (var_x < 0).any().any() + + if bias: + # check that biased var(x) == mean(x^2) - mean(x)^2 + mean_x2 = ( + (x * x) + .ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na) + .mean() + ) + tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) + + +@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) +@pytest.mark.parametrize("bias", [True, False]) +def test_moments_consistency_var_constant( + consistency_data, adjust, ignore_na, min_periods, bias +): + x, is_constant, no_nans = consistency_data + com = 3.0 + if is_constant: + count_x = x.expanding(min_periods=min_periods).count() + var_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).std(bias=True), - ) + ).var(bias=bias) + + # check that variance of constant series is identically 0 + assert not (var_x > 0).any().any() + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = 0.0 + if not bias: + expected[count_x < 2] = np.nan + tm.assert_equal(var_x, expected) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -@pytest.mark.parametrize("adjust", [True, False]) -@pytest.mark.parametrize("ignore_na", [True, False]) -def test_ewm_consistency_cov(consistency_data, min_periods, adjust, ignore_na): +@pytest.mark.parametrize("bias", [True, False]) +def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, bias): x, is_constant, no_nans = consistency_data com = 3.0 - moments_consistency_cov_data( - x=x, - var_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=False) - ), - cov_unbiased=lambda x, y: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).cov(y, bias=False) - ), - var_biased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=True) - ), - cov_biased=lambda x, y: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).cov(y, bias=True) - ), - ) + var_x = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=bias) + std_x = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).std(bias=bias) + assert not (var_x < 0).any().any() + assert not (std_x < 0).any().any() + + # check that var(x) == std(x)^2 + tm.assert_equal(var_x, std_x * std_x) -@pytest.mark.slow @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -@pytest.mark.parametrize("adjust", [True, False]) -@pytest.mark.parametrize("ignore_na", [True, False]) -def test_ewm_consistency_series_data(consistency_data, min_periods, adjust, ignore_na): +@pytest.mark.parametrize("bias", [True, False]) +def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, bias): x, is_constant, no_nans = consistency_data com = 3.0 - moments_consistency_series_data( - x=x, - mean=lambda x: x.ewm( + var_x = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).var(bias=bias) + assert not (var_x < 0).any().any() + + cov_x_x = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).cov(x, bias=bias) + assert not (cov_x_x < 0).any().any() + + # check that var(x) == cov(x, x) + tm.assert_equal(var_x, cov_x_x) + + +@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) +@pytest.mark.parametrize("bias", [True, False]) +def test_ewm_consistency_series_cov_corr( + consistency_data, adjust, ignore_na, min_periods, bias +): + x, is_constant, no_nans = consistency_data + com = 3.0 + + if isinstance(x, Series): + var_x_plus_y = ( + (x + x) + .ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na) + .var(bias=bias) + ) + var_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).mean(), - corr=lambda x, y: x.ewm( + ).var(bias=bias) + var_y = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).corr(y), - var_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=False) - ), - std_unbiased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).std(bias=False) - ), - cov_unbiased=lambda x, y: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).cov(y, bias=False) - ), - var_biased=lambda x: ( - x.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).var(bias=True) - ), - std_biased=lambda x: x.ewm( + ).var(bias=bias) + cov_x_y = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).std(bias=True), - cov_biased=lambda x, y: ( - x.ewm( + ).cov(x, bias=bias) + # check that cov(x, y) == (var(x+y) - var(x) - + # var(y)) / 2 + tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y)) + + # check that corr(x, y) == cov(x, y) / (std(x) * + # std(y)) + corr_x_y = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).corr(x, bias=bias) + std_x = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).std(bias=bias) + std_y = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).std(bias=bias) + tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) + + if bias: + # check that biased cov(x, y) == mean(x*y) - + # mean(x)*mean(y) + mean_x = x.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).cov(y, bias=True) - ), - ) + ).mean() + mean_y = x.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).mean() + mean_x_times_y = ( + (x * x) + .ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ) + .mean() + ) + tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y)) diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py index def6d7289fec2..eceba7f143ab9 100644 --- a/pandas/tests/window/moments/test_moments_ewm.py +++ b/pandas/tests/window/moments/test_moments_ewm.py @@ -226,8 +226,12 @@ def test_ewma_halflife_arg(series): series.ewm() -def test_ewm_alpha(arr): +def test_ewm_alpha(): # GH 10789 + arr = np.random.randn(100) + locs = np.arange(20, 40) + arr[locs] = np.NaN + s = Series(arr) a = s.ewm(alpha=0.61722699889169674).mean() b = s.ewm(com=0.62014947789973052).mean() @@ -254,8 +258,12 @@ def test_ewm_alpha_arg(series): s.ewm(halflife=10.0, alpha=0.5) -def test_ewm_domain_checks(arr): +def test_ewm_domain_checks(): # GH 12492 + arr = np.random.randn(100) + locs = np.arange(20, 40) + arr[locs] = np.NaN + s = Series(arr) msg = "comass must satisfy: comass >= 0" with pytest.raises(ValueError, match=msg):
Same changes as https://github.com/pandas-dev/pandas/pull/37944 and https://github.com/pandas-dev/pandas/pull/37946 Additionally allows removal of `moments/conftest.py` and `tests/window/common.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/37951
2020-11-19T04:58:41Z
2020-11-19T15:40:39Z
2020-11-19T15:40:38Z
2020-11-19T17:56:57Z
ENH: 2D compat for DTA tz_localize, to_period
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 02214ff51b02a..b6938931e86af 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -1,5 +1,6 @@ from __future__ import annotations +from functools import wraps from typing import Any, Optional, Sequence, Type, TypeVar, Union import numpy as np @@ -27,6 +28,26 @@ ) +def ravel_compat(meth): + """ + Decorator to ravel a 2D array before passing it to a cython operation, + then reshape the result to our own shape. + """ + + @wraps(meth) + def method(self, *args, **kwargs): + if self.ndim == 1: + return meth(self, *args, **kwargs) + + flags = self._ndarray.flags + flat = self.ravel("K") + result = meth(flat, *args, **kwargs) + order = "F" if flags.f_contiguous else "C" + return result.reshape(self.shape, order=order) + + return method + + class NDArrayBackedExtensionArray(ExtensionArray): """ ExtensionArray that is backed by a single NumPy ndarray. diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index be9864731842d..ee1323b71f146 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -64,7 +64,7 @@ from pandas.core import nanops, ops from pandas.core.algorithms import checked_add_with_arr, isin, unique1d, value_counts from pandas.core.arraylike import OpsMixin -from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray, ravel_compat import pandas.core.common as com from pandas.core.construction import array, extract_array from pandas.core.indexers import check_array_indexer, check_setitem_lengths @@ -679,6 +679,9 @@ def value_counts(self, dropna: bool = False): ------- Series """ + if self.ndim != 1: + raise NotImplementedError + from pandas import Index, Series if dropna: @@ -694,6 +697,7 @@ def value_counts(self, dropna: bool = False): ) return Series(result._values, index=index, name=result.name) + @ravel_compat def map(self, mapper): # TODO(GH-23179): Add ExtensionArray.map # Need to figure out if we want ExtensionArray.map first. @@ -820,6 +824,9 @@ def freq(self, value): value = to_offset(value) self._validate_frequency(self, value) + if self.ndim > 1: + raise ValueError("Cannot set freq with ndim > 1") + self._freq = value @property @@ -918,7 +925,7 @@ def _is_monotonic_decreasing(self) -> bool: @property def _is_unique(self) -> bool: - return len(unique1d(self.asi8)) == len(self) + return len(unique1d(self.asi8.ravel("K"))) == self.size # ------------------------------------------------------------------ # Arithmetic Methods diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 5fdfa62c393c4..b072ac3cec52e 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -612,14 +612,15 @@ def astype(self, dtype, copy=True): # ----------------------------------------------------------------- # Rendering Methods + @dtl.ravel_compat def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): from pandas.io.formats.format import get_format_datetime64_from_values fmt = get_format_datetime64_from_values(self, date_format) return tslib.format_array_from_datetime( - self.asi8.ravel(), tz=self.tz, format=fmt, na_rep=na_rep - ).reshape(self.shape) + self.asi8, tz=self.tz, format=fmt, na_rep=na_rep + ) # ----------------------------------------------------------------- # Comparison Methods @@ -819,6 +820,7 @@ def tz_convert(self, tz): dtype = tz_to_dtype(tz) return self._simple_new(self.asi8, dtype=dtype, freq=self.freq) + @dtl.ravel_compat def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"): """ Localize tz-naive Datetime Array/Index to tz-aware @@ -1051,6 +1053,7 @@ def normalize(self): new_values = normalize_i8_timestamps(self.asi8, self.tz) return type(self)(new_values)._with_freq("infer").tz_localize(self.tz) + @dtl.ravel_compat def to_period(self, freq=None): """ Cast to PeriodArray/Index at a particular frequency. diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 257baf20ce911..40dd475e6b6f2 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -562,6 +562,7 @@ def _formatter(self, boxed: bool = False): return str return "'{}'".format + @dtl.ravel_compat def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): """ actually format my specific types diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 93c9567380f7f..fe4eaa4b4bf19 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -400,11 +400,12 @@ def _formatter(self, boxed=False): return get_format_timedelta64(self, box=True) + @dtl.ravel_compat def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): from pandas.io.formats.format import get_format_timedelta64 formatter = get_format_timedelta64(self._data, na_rep) - return np.array([formatter(x) for x in self._data.ravel()]).reshape(self.shape) + return np.array([formatter(x) for x in self._data]) # ---------------------------------------------------------------- # Arithmetic Methods diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index c489aa5867632..7c093ebe00959 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -720,6 +720,15 @@ def test_to_period(self, datetime_index, freqstr): # an EA-specific tm.assert_ function tm.assert_index_equal(pd.Index(result), pd.Index(expected)) + def test_to_period_2d(self, arr1d): + arr2d = arr1d.reshape(1, -1) + + warn = None if arr1d.tz is None else UserWarning + with tm.assert_produces_warning(warn): + result = arr2d.to_period("D") + expected = arr1d.to_period("D").reshape(1, -1) + tm.assert_period_array_equal(result, expected) + @pytest.mark.parametrize("propname", pd.DatetimeIndex._bool_ops) def test_bool_properties(self, arr1d, propname): # in this case _bool_ops is just `is_leap_year` diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 4addc0536848f..c8db0157ba219 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -449,6 +449,17 @@ def test_shift_requires_tzmatch(self): with pytest.raises(ValueError, match=msg): dta.shift(1, fill_value=fill_value) + def test_tz_localize_t2d(self): + dti = pd.date_range("1994-05-12", periods=12, tz="US/Pacific") + dta = dti._data.reshape(3, 4) + result = dta.tz_localize(None) + + expected = dta.ravel().tz_localize(None).reshape(dta.shape) + tm.assert_datetime_array_equal(result, expected) + + roundtrip = expected.tz_localize("US/Pacific") + tm.assert_datetime_array_equal(roundtrip, dta) + class TestSequenceToDT64NS: def test_tz_dtype_mismatch_raises(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37950
2020-11-19T01:43:19Z
2020-12-17T17:46:10Z
2020-12-17T17:46:10Z
2020-12-17T17:47:48Z
DOC: Change warning for sort behavior in concat
diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst index f1a28dc30dd68..d8998a9a0a6e1 100644 --- a/doc/source/user_guide/merging.rst +++ b/doc/source/user_guide/merging.rst @@ -194,7 +194,7 @@ behavior: }, index=[2, 3, 6, 7], ) - result = pd.concat([df1, df4], axis=1, sort=False) + result = pd.concat([df1, df4], axis=1) .. ipython:: python @@ -204,13 +204,6 @@ behavior: p.plot([df1, df4], result, labels=["df1", "df4"], vertical=False); plt.close("all"); -.. warning:: - - The default behavior with ``join='outer'`` is to sort the other axis - (columns in this case). In a future version of pandas, the default will - be to not sort. We specified ``sort=False`` to opt in to the new - behavior now. - Here is the same thing with ``join='inner'``: .. ipython:: python
We should either add a ``versionchanged`` attribute, for when this behavior changed or remove the warning completly. Additionally I removed the ``sort=False`` input for the function, because this is no longer necessary.
https://api.github.com/repos/pandas-dev/pandas/pulls/37948
2020-11-18T23:08:14Z
2020-11-19T18:11:18Z
2020-11-19T18:11:18Z
2020-11-19T19:30:23Z
CLN: test_moments_rolling_consistency.py
diff --git a/pandas/tests/window/moments/conftest.py b/pandas/tests/window/moments/conftest.py index 2fab7f5c91c09..ce4d04a9bcc1e 100644 --- a/pandas/tests/window/moments/conftest.py +++ b/pandas/tests/window/moments/conftest.py @@ -17,61 +17,3 @@ def binary_ew_data(): @pytest.fixture(params=[0, 1, 2]) def min_periods(request): return request.param - - -base_functions_list = [ - (lambda v: Series(v).count(), None, "count"), - (lambda v: Series(v).max(), None, "max"), - (lambda v: Series(v).min(), None, "min"), - (lambda v: Series(v).sum(), None, "sum"), - (lambda v: Series(v).mean(), None, "mean"), - (lambda v: Series(v).std(), 1, "std"), - (lambda v: Series(v).cov(Series(v)), None, "cov"), - (lambda v: Series(v).corr(Series(v)), None, "corr"), - (lambda v: Series(v).var(), 1, "var"), - # restore once GH 8086 is fixed - # lambda v: Series(v).skew(), 3, 'skew'), - # (lambda v: Series(v).kurt(), 4, 'kurt'), - # restore once GH 8084 is fixed - # lambda v: Series(v).quantile(0.3), None, 'quantile'), - (lambda v: Series(v).median(), None, "median"), - (np.nanmax, 1, "max"), - (np.nanmin, 1, "min"), - (np.nansum, 1, "sum"), - (np.nanmean, 1, "mean"), - (lambda v: np.nanstd(v, ddof=1), 1, "std"), - (lambda v: np.nanvar(v, ddof=1), 1, "var"), - (np.nanmedian, 1, "median"), -] - -no_nan_functions_list = [ - (np.max, None, "max"), - (np.min, None, "min"), - (np.sum, None, "sum"), - (np.mean, None, "mean"), - (lambda v: np.std(v, ddof=1), 1, "std"), - (lambda v: np.var(v, ddof=1), 1, "var"), - (np.median, None, "median"), -] - - -@pytest.fixture(scope="session") -def base_functions(): - """Fixture for base functions. - - Returns - ------- - List of tuples: (applied function, require_min_periods, name of applied function) - """ - return base_functions_list - - -@pytest.fixture(scope="session") -def no_nan_functions(): - """Fixture for no nan functions. - - Returns - ------- - List of tuples: (applied function, require_min_periods, name of applied function) - """ - return no_nan_functions_list diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py index b7b05c1a6e30d..ceabf71747cb8 100644 --- a/pandas/tests/window/moments/test_moments_consistency_rolling.py +++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py @@ -1,5 +1,4 @@ from datetime import datetime -import warnings import numpy as np import pytest @@ -10,15 +9,6 @@ from pandas import DataFrame, DatetimeIndex, Index, Series import pandas._testing as tm from pandas.core.window.common import flex_binary_moment -from pandas.tests.window.common import ( - moments_consistency_cov_data, - moments_consistency_is_constant, - moments_consistency_mock_mean, - moments_consistency_series_data, - moments_consistency_std_data, - moments_consistency_var_data, - moments_consistency_var_debiasing_factors, -) def _rolling_consistency_cases(): @@ -87,56 +77,47 @@ def test_flex_binary_frame(method, frame): tm.assert_frame_equal(res3, exp) -@pytest.mark.slow @pytest.mark.parametrize( "window,min_periods,center", list(_rolling_consistency_cases()) ) -def test_rolling_apply_consistency( - consistency_data, base_functions, no_nan_functions, window, min_periods, center +@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum]) +def test_rolling_apply_consistency_sum_nans( + consistency_data, window, min_periods, center, f ): x, is_constant, no_nans = consistency_data - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning - ) - # test consistency between rolling_xyz() and either (a) - # rolling_apply of Series.xyz(), or (b) rolling_apply of - # np.nanxyz() - functions = base_functions - - # GH 8269 - if no_nans: - functions = no_nan_functions + base_functions - for (f, require_min_periods, name) in functions: - rolling_f = getattr( - x.rolling(window=window, center=center, min_periods=min_periods), name - ) + if f is np.nansum and min_periods == 0: + pass + else: + rolling_f_result = x.rolling( + window=window, min_periods=min_periods, center=center + ).sum() + rolling_apply_f_result = x.rolling( + window=window, min_periods=min_periods, center=center + ).apply(func=f, raw=True) + tm.assert_equal(rolling_f_result, rolling_apply_f_result) - if ( - require_min_periods - and (min_periods is not None) - and (min_periods < require_min_periods) - ): - continue - if name == "count": - rolling_f_result = rolling_f() - rolling_apply_f_result = x.rolling( - window=window, min_periods=min_periods, center=center - ).apply(func=f, raw=True) - else: - if name in ["cov", "corr"]: - rolling_f_result = rolling_f(pairwise=False) - else: - rolling_f_result = rolling_f() - rolling_apply_f_result = x.rolling( - window=window, min_periods=min_periods, center=center - ).apply(func=f, raw=True) - - # GH 9422 - if name in ["sum", "prod"]: - tm.assert_equal(rolling_f_result, rolling_apply_f_result) +@pytest.mark.parametrize( + "window,min_periods,center", list(_rolling_consistency_cases()) +) +@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum]) +def test_rolling_apply_consistency_sum_no_nans( + consistency_data, window, min_periods, center, f +): + x, is_constant, no_nans = consistency_data + + if no_nans: + if f is np.nansum and min_periods == 0: + pass + else: + rolling_f_result = x.rolling( + window=window, min_periods=min_periods, center=center + ).sum() + rolling_apply_f_result = x.rolling( + window=window, min_periods=min_periods, center=center + ).apply(func=f, raw=True) + tm.assert_equal(rolling_f_result, rolling_apply_f_result) @pytest.mark.parametrize("window", range(7)) @@ -174,14 +155,9 @@ def test_corr_sanity(): res = df[0].rolling(5, center=True).corr(df[1]) assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res) - # and some fuzzing - for _ in range(10): - df = DataFrame(np.random.rand(30, 2)) - res = df[0].rolling(5, center=True).corr(df[1]) - try: - assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res) - except AssertionError: - print(res) + df = DataFrame(np.random.rand(30, 2)) + res = df[0].rolling(5, center=True).corr(df[1]) + assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res) def test_rolling_cov_diff_length(): @@ -227,10 +203,12 @@ def test_rolling_corr_diff_length(): lambda x: x.rolling(window=10, min_periods=5).median(), lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), - lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), + pytest.param( + lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), + marks=td.skip_if_no_scipy, + ), ], ) -@td.skip_if_no_scipy def test_rolling_functions_window_non_shrinkage(f): # GH 7764 s = Series(range(4)) @@ -245,7 +223,14 @@ def test_rolling_functions_window_non_shrinkage(f): tm.assert_frame_equal(df_result, df_expected) -def test_rolling_functions_window_non_shrinkage_binary(): +@pytest.mark.parametrize( + "f", + [ + lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), + lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), + ], +) +def test_rolling_functions_window_non_shrinkage_binary(f): # corr/cov return a MI DataFrame df = DataFrame( @@ -258,13 +243,8 @@ def test_rolling_functions_window_non_shrinkage_binary(): index=pd.MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]), dtype="float64", ) - functions = [ - lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), - lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), - ] - for f in functions: - df_result = f(df) - tm.assert_frame_equal(df_result, df_expected) + df_result = f(df) + tm.assert_frame_equal(df_result, df_expected) def test_rolling_skew_edge_cases(): @@ -427,34 +407,26 @@ def test_rolling_median_memory_error(): Series(np.random.randn(n)).rolling(window=2, center=False).median() -def test_rolling_min_max_numeric_types(): - +@pytest.mark.parametrize( + "data_type", + [np.dtype(f"f{width}") for width in [4, 8]] + + [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"], +) +def test_rolling_min_max_numeric_types(data_type): # GH12373 - types_test = [np.dtype(f"f{width}") for width in [4, 8]] - types_test.extend( - [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"] - ) - for data_type in types_test: - # Just testing that these don't throw exceptions and that - # the return type is float64. Other tests will cover quantitative - # correctness - result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max() - assert result.dtypes[0] == np.dtype("f8") - result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min() - assert result.dtypes[0] == np.dtype("f8") + # Just testing that these don't throw exceptions and that + # the return type is float64. Other tests will cover quantitative + # correctness + result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max() + assert result.dtypes[0] == np.dtype("f8") + result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min() + assert result.dtypes[0] == np.dtype("f8") -def test_moment_functions_zero_length(): - # GH 8056 - s = Series(dtype=np.float64) - s_expected = s - df1 = DataFrame() - df1_expected = df1 - df2 = DataFrame(columns=["a"]) - df2["a"] = df2["a"].astype("float64") - df2_expected = df2 - functions = [ +@pytest.mark.parametrize( + "f", + [ lambda x: x.rolling(window=10, min_periods=0).count(), lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False), lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False), @@ -470,25 +442,40 @@ def test_moment_functions_zero_length(): lambda x: x.rolling(window=10, min_periods=5).median(), lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False), lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True), - lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), - ] - for f in functions: - try: - s_result = f(s) - tm.assert_series_equal(s_result, s_expected) + pytest.param( + lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(), + marks=td.skip_if_no_scipy, + ), + ], +) +def test_moment_functions_zero_length(f): + # GH 8056 + s = Series(dtype=np.float64) + s_expected = s + df1 = DataFrame() + df1_expected = df1 + df2 = DataFrame(columns=["a"]) + df2["a"] = df2["a"].astype("float64") + df2_expected = df2 - df1_result = f(df1) - tm.assert_frame_equal(df1_result, df1_expected) + s_result = f(s) + tm.assert_series_equal(s_result, s_expected) - df2_result = f(df2) - tm.assert_frame_equal(df2_result, df2_expected) - except (ImportError): + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) - # scipy needed for rolling_window - continue + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) -def test_moment_functions_zero_length_pairwise(): +@pytest.mark.parametrize( + "f", + [ + lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), + lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), + ], +) +def test_moment_functions_zero_length_pairwise(f): df1 = DataFrame() df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar")) @@ -505,194 +492,228 @@ def test_moment_functions_zero_length_pairwise(): dtype="float64", ) - functions = [ - lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), - lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), - ] - - for f in functions: - df1_result = f(df1) - tm.assert_frame_equal(df1_result, df1_expected) + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) - df2_result = f(df2) - tm.assert_frame_equal(df2_result, df2_expected) + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) -@pytest.mark.slow @pytest.mark.parametrize( "window,min_periods,center", list(_rolling_consistency_cases()) ) -def test_rolling_consistency_var(consistency_data, window, min_periods, center): +@pytest.mark.parametrize("ddof", [0, 1]) +def test_moments_consistency_var(consistency_data, window, min_periods, center, ddof): x, is_constant, no_nans = consistency_data - moments_consistency_var_data( - x=x, - is_constant=is_constant, - min_periods=min_periods, - count=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).count() - ), - mean=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).mean() - ), - var_unbiased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var() - ), - var_biased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var(ddof=0) - ), + + mean_x = x.rolling(window=window, min_periods=min_periods, center=center).mean() + var_x = x.rolling(window=window, min_periods=min_periods, center=center).var( + ddof=ddof ) + assert not (var_x < 0).any().any() + + if ddof == 0: + # check that biased var(x) == mean(x^2) - mean(x)^2 + mean_x2 = ( + (x * x) + .rolling(window=window, min_periods=min_periods, center=center) + .mean() + ) + tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) -@pytest.mark.slow @pytest.mark.parametrize( "window,min_periods,center", list(_rolling_consistency_cases()) ) -def test_rolling_consistency_std(consistency_data, window, min_periods, center): +@pytest.mark.parametrize("ddof", [0, 1]) +def test_moments_consistency_var_constant( + consistency_data, window, min_periods, center, ddof +): x, is_constant, no_nans = consistency_data - moments_consistency_std_data( - x=x, - var_unbiased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var() - ), - std_unbiased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).std() - ), - var_biased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var(ddof=0) - ), - std_biased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).std(ddof=0) - ), - ) + + if is_constant: + count_x = x.rolling( + window=window, min_periods=min_periods, center=center + ).count() + var_x = x.rolling(window=window, min_periods=min_periods, center=center).var( + ddof=ddof + ) + + # check that variance of constant series is identically 0 + assert not (var_x > 0).any().any() + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = 0.0 + if ddof == 1: + expected[count_x < 2] = np.nan + tm.assert_equal(var_x, expected) -@pytest.mark.slow @pytest.mark.parametrize( "window,min_periods,center", list(_rolling_consistency_cases()) ) -def test_rolling_consistency_cov(consistency_data, window, min_periods, center): +@pytest.mark.parametrize("ddof", [0, 1]) +def test_rolling_consistency_std(consistency_data, window, min_periods, center, ddof): x, is_constant, no_nans = consistency_data - moments_consistency_cov_data( - x=x, - var_unbiased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var() - ), - cov_unbiased=lambda x, y: ( - x.rolling(window=window, min_periods=min_periods, center=center).cov(y) - ), - var_biased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var(ddof=0) - ), - cov_biased=lambda x, y: ( - x.rolling(window=window, min_periods=min_periods, center=center).cov( - y, ddof=0 - ) - ), + + var_x = x.rolling(window=window, min_periods=min_periods, center=center).var( + ddof=ddof + ) + std_x = x.rolling(window=window, min_periods=min_periods, center=center).std( + ddof=ddof ) + assert not (var_x < 0).any().any() + assert not (std_x < 0).any().any() + + # check that var(x) == std(x)^2 + tm.assert_equal(var_x, std_x * std_x) -@pytest.mark.slow @pytest.mark.parametrize( "window,min_periods,center", list(_rolling_consistency_cases()) ) -def test_rolling_consistency_series(consistency_data, window, min_periods, center): +@pytest.mark.parametrize("ddof", [0, 1]) +def test_rolling_consistency_cov(consistency_data, window, min_periods, center, ddof): x, is_constant, no_nans = consistency_data - moments_consistency_series_data( - x=x, - mean=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).mean() - ), - corr=lambda x, y: ( - x.rolling(window=window, min_periods=min_periods, center=center).corr(y) - ), - var_unbiased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var() - ), - std_unbiased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).std() - ), - cov_unbiased=lambda x, y: ( - x.rolling(window=window, min_periods=min_periods, center=center).cov(y) - ), - var_biased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var(ddof=0) - ), - std_biased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).std(ddof=0) - ), - cov_biased=lambda x, y: ( - x.rolling(window=window, min_periods=min_periods, center=center).cov( - y, ddof=0 - ) - ), + var_x = x.rolling(window=window, min_periods=min_periods, center=center).var( + ddof=ddof + ) + assert not (var_x < 0).any().any() + + cov_x_x = x.rolling(window=window, min_periods=min_periods, center=center).cov( + x, ddof=ddof ) + assert not (cov_x_x < 0).any().any() + + # check that var(x) == cov(x, x) + tm.assert_equal(var_x, cov_x_x) -@pytest.mark.slow @pytest.mark.parametrize( "window,min_periods,center", list(_rolling_consistency_cases()) ) -def test_rolling_consistency(consistency_data, window, min_periods, center): +@pytest.mark.parametrize("ddof", [0, 1]) +def test_rolling_consistency_series_cov_corr( + consistency_data, window, min_periods, center, ddof +): x, is_constant, no_nans = consistency_data - # suppress warnings about empty slices, as we are deliberately testing - # with empty/0-length Series/DataFrames - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning - ) - # test consistency between different rolling_* moments - moments_consistency_mock_mean( - x=x, - mean=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).mean() - ), - mock_mean=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center) - .sum() - .divide( - x.rolling( - window=window, min_periods=min_periods, center=center - ).count() - ) - ), + if isinstance(x, Series): + var_x_plus_y = ( + (x + x) + .rolling(window=window, min_periods=min_periods, center=center) + .var(ddof=ddof) + ) + var_x = x.rolling(window=window, min_periods=min_periods, center=center).var( + ddof=ddof + ) + var_y = x.rolling(window=window, min_periods=min_periods, center=center).var( + ddof=ddof ) + cov_x_y = x.rolling(window=window, min_periods=min_periods, center=center).cov( + x, ddof=ddof + ) + # check that cov(x, y) == (var(x+y) - var(x) - + # var(y)) / 2 + tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y)) + + # check that corr(x, y) == cov(x, y) / (std(x) * + # std(y)) + corr_x_y = x.rolling( + window=window, min_periods=min_periods, center=center + ).corr(x) + std_x = x.rolling(window=window, min_periods=min_periods, center=center).std( + ddof=ddof + ) + std_y = x.rolling(window=window, min_periods=min_periods, center=center).std( + ddof=ddof + ) + tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) + + if ddof == 0: + # check that biased cov(x, y) == mean(x*y) - + # mean(x)*mean(y) + mean_x = x.rolling( + window=window, min_periods=min_periods, center=center + ).mean() + mean_y = x.rolling( + window=window, min_periods=min_periods, center=center + ).mean() + mean_x_times_y = ( + (x * x) + .rolling(window=window, min_periods=min_periods, center=center) + .mean() + ) + tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y)) - moments_consistency_is_constant( - x=x, - is_constant=is_constant, - min_periods=min_periods, - count=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).count() - ), - mean=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).mean() - ), - corr=lambda x, y: ( - x.rolling(window=window, min_periods=min_periods, center=center).corr(y) - ), + +@pytest.mark.parametrize( + "window,min_periods,center", list(_rolling_consistency_cases()) +) +def test_rolling_consistency_mean(consistency_data, window, min_periods, center): + x, is_constant, no_nans = consistency_data + + result = x.rolling(window=window, min_periods=min_periods, center=center).mean() + expected = ( + x.rolling(window=window, min_periods=min_periods, center=center) + .sum() + .divide( + x.rolling(window=window, min_periods=min_periods, center=center).count() ) + ) + tm.assert_equal(result, expected.astype("float64")) - moments_consistency_var_debiasing_factors( - x=x, - var_unbiased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var() - ), - var_biased=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center).var( - ddof=0 - ) - ), - var_debiasing_factors=lambda x: ( - x.rolling(window=window, min_periods=min_periods, center=center) - .count() - .divide( - ( - x.rolling( - window=window, min_periods=min_periods, center=center - ).count() - - 1.0 - ).replace(0.0, np.nan) - ) - ), + +@pytest.mark.parametrize( + "window,min_periods,center", list(_rolling_consistency_cases()) +) +def test_rolling_consistency_constant(consistency_data, window, min_periods, center): + x, is_constant, no_nans = consistency_data + + if is_constant: + count_x = x.rolling( + window=window, min_periods=min_periods, center=center + ).count() + mean_x = x.rolling(window=window, min_periods=min_periods, center=center).mean() + # check that correlation of a series with itself is either 1 or NaN + corr_x_x = x.rolling( + window=window, min_periods=min_periods, center=center + ).corr(x) + + exp = x.max() if isinstance(x, Series) else x.max().max() + + # check mean of constant series + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = exp + tm.assert_equal(mean_x, expected) + + # check correlation of constant series with itself is NaN + expected[:] = np.nan + tm.assert_equal(corr_x_x, expected) + + +@pytest.mark.parametrize( + "window,min_periods,center", list(_rolling_consistency_cases()) +) +def test_rolling_consistency_var_debiasing_factors( + consistency_data, window, min_periods, center +): + x, is_constant, no_nans = consistency_data + + # check variance debiasing factors + var_unbiased_x = x.rolling( + window=window, min_periods=min_periods, center=center + ).var() + var_biased_x = x.rolling(window=window, min_periods=min_periods, center=center).var( + ddof=0 + ) + var_debiasing_factors_x = ( + x.rolling(window=window, min_periods=min_periods, center=center) + .count() + .divide( + ( + x.rolling(window=window, min_periods=min_periods, center=center).count() + - 1.0 + ).replace(0.0, np.nan) ) + ) + tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
Same changes as https://github.com/pandas-dev/pandas/pull/37944
https://api.github.com/repos/pandas-dev/pandas/pulls/37946
2020-11-18T21:26:53Z
2020-11-18T23:48:16Z
2020-11-18T23:48:16Z
2020-11-19T01:10:48Z
PERF: use np.putmask instead of ndarray.__setitem__
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 477a06ebd3cef..1f348ca0b0ece 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1028,10 +1028,15 @@ def _putmask_simple(self, mask: np.ndarray, value: Any): if lib.is_scalar(value) and isinstance(values, np.ndarray): value = convert_scalar_for_putitemlike(value, values.dtype) - if is_list_like(value) and len(value) == len(values): - values[mask] = value[mask] + if self.is_extension or self.is_object: + # GH#19266 using np.putmask gives unexpected results with listlike value + if is_list_like(value) and len(value) == len(values): + values[mask] = value[mask] + else: + values[mask] = value else: - values[mask] = value + # GH#37833 np.putmask is more performant than __setitem__ + np.putmask(values, mask, value) def putmask( self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False
- [x] closes #37833 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37945
2020-11-18T20:41:56Z
2020-11-19T02:07:37Z
2020-11-19T02:07:37Z
2020-11-19T02:16:36Z
CLN: test_moments_expanding_consistency.py
diff --git a/pandas/tests/window/moments/test_moments_consistency_expanding.py b/pandas/tests/window/moments/test_moments_consistency_expanding.py index 25a897545ce58..17f76bf824a5d 100644 --- a/pandas/tests/window/moments/test_moments_consistency_expanding.py +++ b/pandas/tests/window/moments/test_moments_consistency_expanding.py @@ -1,19 +1,8 @@ -import warnings - import numpy as np import pytest from pandas import DataFrame, Index, MultiIndex, Series, isna, notna import pandas._testing as tm -from pandas.tests.window.common import ( - moments_consistency_cov_data, - moments_consistency_is_constant, - moments_consistency_mock_mean, - moments_consistency_series_data, - moments_consistency_std_data, - moments_consistency_var_data, - moments_consistency_var_debiasing_factors, -) def test_expanding_corr(series): @@ -171,143 +160,173 @@ def test_expanding_min_periods_apply(engine_and_raw): @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -def test_expanding_apply_consistency( - consistency_data, base_functions, no_nan_functions, min_periods -): +@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum]) +def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f): x, is_constant, no_nans = consistency_data - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning + if f is np.nansum and min_periods == 0: + pass + else: + expanding_f_result = x.expanding(min_periods=min_periods).sum() + expanding_apply_f_result = x.expanding(min_periods=min_periods).apply( + func=f, raw=True ) - # test consistency between expanding_xyz() and either (a) - # expanding_apply of Series.xyz(), or (b) expanding_apply of - # np.nanxyz() - functions = base_functions - - # GH 8269 - if no_nans: - functions = base_functions + no_nan_functions - for (f, require_min_periods, name) in functions: - expanding_f = getattr(x.expanding(min_periods=min_periods), name) - - if ( - require_min_periods - and (min_periods is not None) - and (min_periods < require_min_periods) - ): - continue - - if name == "count": - expanding_f_result = expanding_f() - expanding_apply_f_result = x.expanding(min_periods=0).apply( - func=f, raw=True - ) - else: - if name in ["cov", "corr"]: - expanding_f_result = expanding_f(pairwise=False) - else: - expanding_f_result = expanding_f() - expanding_apply_f_result = x.expanding(min_periods=min_periods).apply( - func=f, raw=True - ) - - # GH 9422 - if name in ["sum", "prod"]: - tm.assert_equal(expanding_f_result, expanding_apply_f_result) + tm.assert_equal(expanding_f_result, expanding_apply_f_result) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -def test_moments_consistency_var(consistency_data, min_periods): +@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum]) +def test_expanding_apply_consistency_sum_no_nans(consistency_data, min_periods, f): + x, is_constant, no_nans = consistency_data - moments_consistency_var_data( - x=x, - is_constant=is_constant, - min_periods=min_periods, - count=lambda x: x.expanding(min_periods=min_periods).count(), - mean=lambda x: x.expanding(min_periods=min_periods).mean(), - var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), - var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), - ) + + if no_nans: + if f is np.nansum and min_periods == 0: + pass + else: + expanding_f_result = x.expanding(min_periods=min_periods).sum() + expanding_apply_f_result = x.expanding(min_periods=min_periods).apply( + func=f, raw=True + ) + tm.assert_equal(expanding_f_result, expanding_apply_f_result) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -def test_expanding_consistency_std(consistency_data, min_periods): +@pytest.mark.parametrize("ddof", [0, 1]) +def test_moments_consistency_var(consistency_data, min_periods, ddof): x, is_constant, no_nans = consistency_data - moments_consistency_std_data( - x=x, - var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), - std_unbiased=lambda x: x.expanding(min_periods=min_periods).std(), - var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), - std_biased=lambda x: x.expanding(min_periods=min_periods).std(ddof=0), - ) + + mean_x = x.expanding(min_periods=min_periods).mean() + var_x = x.expanding(min_periods=min_periods).var(ddof=ddof) + assert not (var_x < 0).any().any() + + if ddof == 0: + # check that biased var(x) == mean(x^2) - mean(x)^2 + mean_x2 = (x * x).expanding(min_periods=min_periods).mean() + tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x)) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -def test_expanding_consistency_cov(consistency_data, min_periods): +@pytest.mark.parametrize("ddof", [0, 1]) +def test_moments_consistency_var_constant(consistency_data, min_periods, ddof): x, is_constant, no_nans = consistency_data - moments_consistency_cov_data( - x=x, - var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), - cov_unbiased=lambda x, y: x.expanding(min_periods=min_periods).cov(y), - var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), - cov_biased=lambda x, y: x.expanding(min_periods=min_periods).cov(y, ddof=0), - ) + + if is_constant: + count_x = x.expanding(min_periods=min_periods).count() + var_x = x.expanding(min_periods=min_periods).var(ddof=ddof) + + # check that variance of constant series is identically 0 + assert not (var_x > 0).any().any() + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = 0.0 + if ddof == 1: + expected[count_x < 2] = np.nan + tm.assert_equal(var_x, expected) + + +@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) +@pytest.mark.parametrize("ddof", [0, 1]) +def test_expanding_consistency_std(consistency_data, min_periods, ddof): + x, is_constant, no_nans = consistency_data + + var_x = x.expanding(min_periods=min_periods).var(ddof=ddof) + std_x = x.expanding(min_periods=min_periods).std(ddof=ddof) + assert not (var_x < 0).any().any() + assert not (std_x < 0).any().any() + + # check that var(x) == std(x)^2 + tm.assert_equal(var_x, std_x * std_x) + + +@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) +@pytest.mark.parametrize("ddof", [0, 1]) +def test_expanding_consistency_cov(consistency_data, min_periods, ddof): + x, is_constant, no_nans = consistency_data + var_x = x.expanding(min_periods=min_periods).var(ddof=ddof) + assert not (var_x < 0).any().any() + + cov_x_x = x.expanding(min_periods=min_periods).cov(x, ddof=ddof) + assert not (cov_x_x < 0).any().any() + + # check that var(x) == cov(x, x) + tm.assert_equal(var_x, cov_x_x) + + +@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) +@pytest.mark.parametrize("ddof", [0, 1]) +def test_expanding_consistency_series_cov_corr(consistency_data, min_periods, ddof): + x, is_constant, no_nans = consistency_data + + if isinstance(x, Series): + var_x_plus_y = (x + x).expanding(min_periods=min_periods).var(ddof=ddof) + var_x = x.expanding(min_periods=min_periods).var(ddof=ddof) + var_y = x.expanding(min_periods=min_periods).var(ddof=ddof) + cov_x_y = x.expanding(min_periods=min_periods).cov(x, ddof=ddof) + # check that cov(x, y) == (var(x+y) - var(x) - + # var(y)) / 2 + tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y)) + + # check that corr(x, y) == cov(x, y) / (std(x) * + # std(y)) + corr_x_y = x.expanding(min_periods=min_periods).corr(x) + std_x = x.expanding(min_periods=min_periods).std(ddof=ddof) + std_y = x.expanding(min_periods=min_periods).std(ddof=ddof) + tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) + + if ddof == 0: + # check that biased cov(x, y) == mean(x*y) - + # mean(x)*mean(y) + mean_x = x.expanding(min_periods=min_periods).mean() + mean_y = x.expanding(min_periods=min_periods).mean() + mean_x_times_y = (x * x).expanding(min_periods=min_periods).mean() + tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y)) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -def test_expanding_consistency_series(consistency_data, min_periods): +def test_expanding_consistency_mean(consistency_data, min_periods): x, is_constant, no_nans = consistency_data - moments_consistency_series_data( - x=x, - mean=lambda x: x.expanding(min_periods=min_periods).mean(), - corr=lambda x, y: x.expanding(min_periods=min_periods).corr(y), - var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), - std_unbiased=lambda x: x.expanding(min_periods=min_periods).std(), - cov_unbiased=lambda x, y: x.expanding(min_periods=min_periods).cov(y), - var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), - std_biased=lambda x: x.expanding(min_periods=min_periods).std(ddof=0), - cov_biased=lambda x, y: x.expanding(min_periods=min_periods).cov(y, ddof=0), + + result = x.expanding(min_periods=min_periods).mean() + expected = ( + x.expanding(min_periods=min_periods).sum() + / x.expanding(min_periods=min_periods).count() ) + tm.assert_equal(result, expected.astype("float64")) -@pytest.mark.slow @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) -def test_expanding_consistency(consistency_data, min_periods): +def test_expanding_consistency_constant(consistency_data, min_periods): x, is_constant, no_nans = consistency_data - # suppress warnings about empty slices, as we are deliberately testing - # with empty/0-length Series/DataFrames - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning - ) - # test consistency between different expanding_* moments - moments_consistency_mock_mean( - x=x, - mean=lambda x: x.expanding(min_periods=min_periods).mean(), - mock_mean=lambda x: x.expanding(min_periods=min_periods).sum() - / x.expanding().count(), - ) + if is_constant: + count_x = x.expanding().count() + mean_x = x.expanding(min_periods=min_periods).mean() + # check that correlation of a series with itself is either 1 or NaN + corr_x_x = x.expanding(min_periods=min_periods).corr(x) - moments_consistency_is_constant( - x=x, - is_constant=is_constant, - min_periods=min_periods, - count=lambda x: x.expanding().count(), - mean=lambda x: x.expanding(min_periods=min_periods).mean(), - corr=lambda x, y: x.expanding(min_periods=min_periods).corr(y), - ) + exp = x.max() if isinstance(x, Series) else x.max().max() - moments_consistency_var_debiasing_factors( - x=x, - var_unbiased=lambda x: x.expanding(min_periods=min_periods).var(), - var_biased=lambda x: x.expanding(min_periods=min_periods).var(ddof=0), - var_debiasing_factors=lambda x: ( - x.expanding().count() - / (x.expanding().count() - 1.0).replace(0.0, np.nan) - ), - ) + # check mean of constant series + expected = x * np.nan + expected[count_x >= max(min_periods, 1)] = exp + tm.assert_equal(mean_x, expected) + + # check correlation of constant series with itself is NaN + expected[:] = np.nan + tm.assert_equal(corr_x_x, expected) + + +@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) +def test_expanding_consistency_var_debiasing_factors(consistency_data, min_periods): + x, is_constant, no_nans = consistency_data + + # check variance debiasing factors + var_unbiased_x = x.expanding(min_periods=min_periods).var() + var_biased_x = x.expanding(min_periods=min_periods).var(ddof=0) + var_debiasing_factors_x = x.expanding().count() / ( + x.expanding().count() - 1.0 + ).replace(0.0, np.nan) + tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x) @pytest.mark.parametrize(
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Breaking up the predefined `moments_consistency_*` functions into more scoped tests with more pytest idioms.
https://api.github.com/repos/pandas-dev/pandas/pulls/37944
2020-11-18T18:39:04Z
2020-11-18T20:07:40Z
2020-11-18T20:07:39Z
2020-11-18T20:07:45Z
BUG: fix sharey overwrite on area plots
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index e00b177f2a2fc..5abad99637ebe 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -664,6 +664,7 @@ Plotting - Bug in :meth:`Series.plot` and :meth:`DataFrame.plot` was throwing :exc:`ValueError` with a :class:`Series` or :class:`DataFrame` indexed by a :class:`TimedeltaIndex` with a fixed frequency when x-axis lower limit was greater than upper limit (:issue:`37454`) - Bug in :meth:`DataFrameGroupBy.boxplot` when ``subplots=False``, a KeyError would raise (:issue:`16748`) +- Bug in :meth:`DataFrame.plot` and :meth:`Series.plot` was overwriting matplotlib's shared y axes behaviour when no sharey parameter was passed (:issue:`37942`) Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 2501d84de4459..97fa3f11e9dfb 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1338,7 +1338,9 @@ def _plot( def _post_plot_logic(self, ax: "Axes", data): LinePlot._post_plot_logic(self, ax, data) - if self.ylim is None: + is_shared_y = len(list(ax.get_shared_y_axes())) > 0 + # do not override the default axis behaviour in case of shared y axes + if self.ylim is None and not is_shared_y: if (data >= 0).all().all(): ax.set_ylim(0, None) elif (data <= 0).all().all(): diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 3c43e0b693a1b..56ac7a477adbb 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -477,6 +477,17 @@ def test_area_lim(self): ymin, ymax = ax.get_ylim() assert ymax == 0 + def test_area_sharey_dont_overwrite(self): + # GH37942 + df = DataFrame(np.random.rand(4, 2), columns=["x", "y"]) + fig, (ax1, ax2) = self.plt.subplots(1, 2, sharey=True) + + df.plot(ax=ax1, kind="area") + df.plot(ax=ax2, kind="area") + + assert ax1._shared_y_axes.joined(ax1, ax2) + assert ax2._shared_y_axes.joined(ax1, ax2) + @pytest.mark.slow def test_bar_linewidth(self): df = DataFrame(np.random.randn(5, 5)) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index f3289d0573de2..b8dd2ada87506 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -140,6 +140,16 @@ def test_ts_area_lim(self): assert xmax >= line[-1] self._check_ticks_props(ax, xrot=0) + def test_area_sharey_dont_overwrite(self): + # GH37942 + fig, (ax1, ax2) = self.plt.subplots(1, 2, sharey=True) + + abs(self.ts).plot(ax=ax1, kind="area") + abs(self.ts).plot(ax=ax2, kind="area") + + assert ax1._shared_y_axes.joined(ax1, ax2) + assert ax2._shared_y_axes.joined(ax1, ax2) + def test_label(self): s = Series([1, 2]) _, ax = self.plt.subplots()
- [x] closes #37942 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37943
2020-11-18T18:00:52Z
2020-11-20T09:38:15Z
2020-11-20T09:38:14Z
2020-12-02T16:32:06Z
CI: Fix ci flake error
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index d52a8ae935688..0b0f985697da9 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -6,7 +6,7 @@ from pandas._libs import index as libindex from pandas._libs.tslibs import BaseOffset, Period, Resolution, Tick from pandas._libs.tslibs.parsing import DateParseError, parse_time_string -from pandas._typing import DtypeObj, Label +from pandas._typing import DtypeObj from pandas.errors import InvalidIndexError from pandas.util._decorators import Appender, cache_readonly, doc
- [ ] xref #36305 came across it in #36305
https://api.github.com/repos/pandas-dev/pandas/pulls/37938
2020-11-18T15:57:35Z
2020-11-18T16:47:46Z
2020-11-18T16:47:46Z
2020-11-18T16:50:36Z
BUG: loc.setitem with expansion expanding rows
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 08edc7531bcd6..e875153626b5a 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -622,6 +622,7 @@ Indexing - Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` raises when the index was of ``object`` dtype and the given numeric label was in the index (:issue:`26491`) - Bug in :meth:`DataFrame.loc` returned requested key plus missing values when ``loc`` was applied to single level from a :class:`MultiIndex` (:issue:`27104`) - Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using a listlike indexer containing NA values (:issue:`37722`) +- Bug in :meth:`DataFrame.loc.__setitem__` expanding an empty :class:`DataFrame` with mixed dtypes (:issue:`37932`) - Bug in :meth:`DataFrame.xs` ignored ``droplevel=False`` for columns (:issue:`19056`) - Bug in :meth:`DataFrame.reindex` raising ``IndexingError`` wrongly for empty DataFrame with ``tolerance`` not None or ``method="nearest"`` (:issue:`27315`) - Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using listlike indexer that contains elements that are in the index's ``categories`` but not in the index itself failing to raise ``KeyError`` (:issue:`37901`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index bdd79e37ba386..f66fd19f30d84 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1687,6 +1687,14 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str): for loc, v in zip(ilocs, value): self._setitem_single_column(loc, v, pi) + elif len(ilocs) == 1 and com.is_null_slice(pi) and len(self.obj) == 0: + # This is a setitem-with-expansion, see + # test_loc_setitem_empty_append_expands_rows_mixed_dtype + # e.g. df = DataFrame(columns=["x", "y"]) + # df["x"] = df["x"].astype(np.int64) + # df.loc[:, "x"] = [1, 2, 3] + self._setitem_single_column(ilocs[0], value, pi) + else: raise ValueError( "Must have equal len keys and value " diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index f8ad16be607a5..cf6c2878acd9a 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -952,7 +952,7 @@ def test_loc_uint64(self): result = s.loc[[np.iinfo("uint64").max - 1, np.iinfo("uint64").max]] tm.assert_series_equal(result, s) - def test_loc_setitem_empty_append(self): + def test_loc_setitem_empty_append_expands_rows(self): # GH6173, various appends to an empty dataframe data = [1, 2, 3] @@ -963,6 +963,18 @@ def test_loc_setitem_empty_append(self): df.loc[:, "x"] = data tm.assert_frame_equal(df, expected) + def test_loc_setitem_empty_append_expands_rows_mixed_dtype(self): + # GH#37932 same as test_loc_setitem_empty_append_expands_rows + # but with mixed dtype so we go through take_split_path + data = [1, 2, 3] + expected = DataFrame({"x": data, "y": [None] * len(data)}) + + df = DataFrame(columns=["x", "y"]) + df["x"] = df["x"].astype(np.int64) + df.loc[:, "x"] = data + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_empty_append_single_value(self): # only appends one value expected = DataFrame({"x": [1.0], "y": [np.nan]}) df = DataFrame(columns=["x", "y"], dtype=float)
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry discovered on branch that has dataframe always go through split_path
https://api.github.com/repos/pandas-dev/pandas/pulls/37932
2020-11-18T04:14:47Z
2020-11-27T19:49:44Z
2020-11-27T19:49:44Z
2020-11-27T19:52:26Z
BUG: loc.setitem corner case
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index f6d14a1c1503c..7bfc62899870f 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1649,7 +1649,8 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str): value = self._align_series(indexer, value) # Ensure we have something we can iterate over - ilocs = self._ensure_iterable_column_indexer(indexer[1]) + info_axis = indexer[1] + ilocs = self._ensure_iterable_column_indexer(info_axis) pi = indexer[0] lplane_indexer = length_of_indexer(pi, self.obj.index) @@ -1673,6 +1674,12 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str): # We are trying to set N values into M entries of a single # column, which is invalid for N != M # Exclude zero-len for e.g. boolean masking that is all-false + + if len(value) == 1 and not is_integer(info_axis): + # This is a case like df.iloc[:3, [1]] = [0] + # where we treat as df.iloc[:3, 1] = 0 + return self._setitem_with_indexer((pi, info_axis[0]), value[0]) + raise ValueError( "Must have equal len keys and value " "when setting with an iterable" diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py index f95eac57e9140..e5d114d5a9b18 100644 --- a/pandas/tests/indexing/multiindex/test_setitem.py +++ b/pandas/tests/indexing/multiindex/test_setitem.py @@ -210,6 +210,12 @@ def test_multiindex_assignment(self): with pytest.raises(ValueError, match=msg): df.loc[4, "c"] = [0] + # But with a length-1 listlike column indexer this behaves like + # `df.loc[4, "c"] = 0 + df.loc[4, ["c"]] = [0] + assert (df.loc[4, "c"] == 0).all() + + def test_groupby_example(self): # groupby example NUM_ROWS = 100 NUM_COLS = 10
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry cc @phofl if you've got any thoughts on how to handle this case
https://api.github.com/repos/pandas-dev/pandas/pulls/37931
2020-11-18T04:03:03Z
2020-11-26T22:26:12Z
2020-11-26T22:26:12Z
2020-11-26T22:26:54Z
CLN: tests/window/*
diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py index a803ce716eb05..e1d7635b0a686 100644 --- a/pandas/tests/window/conftest.py +++ b/pandas/tests/window/conftest.py @@ -35,7 +35,18 @@ def win_types_special(request): @pytest.fixture( - params=["sum", "mean", "median", "max", "min", "var", "std", "kurt", "skew"] + params=[ + "sum", + "mean", + "median", + "max", + "min", + "var", + "std", + "kurt", + "skew", + "count", + ] ) def arithmetic_win_operators(request): return request.param diff --git a/pandas/tests/window/moments/test_moments_consistency_expanding.py b/pandas/tests/window/moments/test_moments_consistency_expanding.py index eb348fda5782b..25a897545ce58 100644 --- a/pandas/tests/window/moments/test_moments_consistency_expanding.py +++ b/pandas/tests/window/moments/test_moments_consistency_expanding.py @@ -16,49 +16,6 @@ ) -def _check_expanding( - func, static_comp, preserve_nan=True, series=None, frame=None, nan_locs=None -): - - series_result = func(series) - assert isinstance(series_result, Series) - frame_result = func(frame) - assert isinstance(frame_result, DataFrame) - - result = func(series) - tm.assert_almost_equal(result[10], static_comp(series[:11])) - - if preserve_nan: - assert result.iloc[nan_locs].isna().all() - - -def _check_expanding_has_min_periods(func, static_comp, has_min_periods): - ser = Series(np.random.randn(50)) - - if has_min_periods: - result = func(ser, min_periods=30) - assert result[:29].isna().all() - tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) - - # min_periods is working correctly - result = func(ser, min_periods=15) - assert isna(result.iloc[13]) - assert notna(result.iloc[14]) - - ser2 = Series(np.random.randn(20)) - result = func(ser2, min_periods=5) - assert isna(result[3]) - assert notna(result[4]) - - # min_periods=0 - result0 = func(ser, min_periods=0) - result1 = func(ser, min_periods=1) - tm.assert_almost_equal(result0, result1) - else: - result = func(ser) - tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) - - def test_expanding_corr(series): A = series.dropna() B = (A + np.random.randn(len(A)))[:-5] @@ -111,50 +68,106 @@ def test_expanding_corr_pairwise(frame): tm.assert_frame_equal(result, rolling_result) -@pytest.mark.parametrize("has_min_periods", [True, False]) @pytest.mark.parametrize( "func,static_comp", [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)], ids=["sum", "mean", "max", "min"], ) -def test_expanding_func(func, static_comp, has_min_periods, series, frame, nan_locs): - def expanding_func(x, min_periods=1, axis=0): - exp = x.expanding(min_periods=min_periods, axis=axis) - return getattr(exp, func)() - - _check_expanding( - expanding_func, - static_comp, - preserve_nan=False, - series=series, - frame=frame, - nan_locs=nan_locs, +def test_expanding_func(func, static_comp, frame_or_series): + data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10)) + result = getattr(data.expanding(min_periods=1, axis=0), func)() + assert isinstance(result, frame_or_series) + + if frame_or_series is Series: + tm.assert_almost_equal(result[10], static_comp(data[:11])) + else: + tm.assert_series_equal( + result.iloc[10], static_comp(data[:11]), check_names=False + ) + + +@pytest.mark.parametrize( + "func,static_comp", + [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)], + ids=["sum", "mean", "max", "min"], +) +def test_expanding_min_periods(func, static_comp): + ser = Series(np.random.randn(50)) + + result = getattr(ser.expanding(min_periods=30, axis=0), func)() + assert result[:29].isna().all() + tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) + + # min_periods is working correctly + result = getattr(ser.expanding(min_periods=15, axis=0), func)() + assert isna(result.iloc[13]) + assert notna(result.iloc[14]) + + ser2 = Series(np.random.randn(20)) + result = getattr(ser2.expanding(min_periods=5, axis=0), func)() + assert isna(result[3]) + assert notna(result[4]) + + # min_periods=0 + result0 = getattr(ser.expanding(min_periods=0, axis=0), func)() + result1 = getattr(ser.expanding(min_periods=1, axis=0), func)() + tm.assert_almost_equal(result0, result1) + + result = getattr(ser.expanding(min_periods=1, axis=0), func)() + tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50])) + + +def test_expanding_apply(engine_and_raw, frame_or_series): + engine, raw = engine_and_raw + data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10)) + result = data.expanding(min_periods=1).apply( + lambda x: x.mean(), raw=raw, engine=engine ) - _check_expanding_has_min_periods(expanding_func, static_comp, has_min_periods) + assert isinstance(result, frame_or_series) + if frame_or_series is Series: + tm.assert_almost_equal(result[9], np.mean(data[:11])) + else: + tm.assert_series_equal(result.iloc[9], np.mean(data[:11]), check_names=False) -@pytest.mark.parametrize("has_min_periods", [True, False]) -def test_expanding_apply(engine_and_raw, has_min_periods, series, frame, nan_locs): +def test_expanding_min_periods_apply(engine_and_raw): engine, raw = engine_and_raw + ser = Series(np.random.randn(50)) + + result = ser.expanding(min_periods=30).apply( + lambda x: x.mean(), raw=raw, engine=engine + ) + assert result[:29].isna().all() + tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50])) + + # min_periods is working correctly + result = ser.expanding(min_periods=15).apply( + lambda x: x.mean(), raw=raw, engine=engine + ) + assert isna(result.iloc[13]) + assert notna(result.iloc[14]) + + ser2 = Series(np.random.randn(20)) + result = ser2.expanding(min_periods=5).apply( + lambda x: x.mean(), raw=raw, engine=engine + ) + assert isna(result[3]) + assert notna(result[4]) + + # min_periods=0 + result0 = ser.expanding(min_periods=0).apply( + lambda x: x.mean(), raw=raw, engine=engine + ) + result1 = ser.expanding(min_periods=1).apply( + lambda x: x.mean(), raw=raw, engine=engine + ) + tm.assert_almost_equal(result0, result1) - def expanding_mean(x, min_periods=1): - - exp = x.expanding(min_periods=min_periods) - result = exp.apply(lambda x: x.mean(), raw=raw, engine=engine) - return result - - # TODO(jreback), needed to add preserve_nan=False - # here to make this pass - _check_expanding( - expanding_mean, - np.mean, - preserve_nan=False, - series=series, - frame=frame, - nan_locs=nan_locs, + result = ser.expanding(min_periods=1).apply( + lambda x: x.mean(), raw=raw, engine=engine ) - _check_expanding_has_min_periods(expanding_mean, np.mean, has_min_periods) + tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50])) @pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4]) diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 6cad93f2d77ba..75e1a771b70ea 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -122,7 +122,6 @@ def test_numpy_compat(method): getattr(r, method)(dtype=np.float64) -@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"]) def test_closed_fixed(closed, arithmetic_win_operators): # GH 34315 func_name = arithmetic_win_operators diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py index d9fcb538c97c1..0782ef2f4ce7b 100644 --- a/pandas/tests/window/test_timeseries_window.py +++ b/pandas/tests/window/test_timeseries_window.py @@ -621,23 +621,8 @@ def test_all(self, f): expected = er.quantile(0.5) tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize( - "f", - [ - "sum", - "mean", - "count", - "median", - "std", - "var", - "kurt", - "skew", - "min", - "max", - ], - ) - def test_all2(self, f): - + def test_all2(self, arithmetic_win_operators): + f = arithmetic_win_operators # more sophisticated comparison of integer vs. # time-based windowing df = DataFrame( diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_win_type.py similarity index 100% rename from pandas/tests/window/test_window.py rename to pandas/tests/window/test_win_type.py
* Renamed `test_window.py` to `test_win_type.py` * Added `'count'` to `arithmetic_win_operators` * Reused fixtures * Used more pytest idioms in `test_moments_consistency_expanding.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/37926
2020-11-18T01:26:10Z
2020-11-18T18:16:19Z
2020-11-18T18:16:19Z
2020-11-18T18:33:29Z
BUG: idxmax/min (and argmax/min) for Series with underlying ExtensionArray
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 607a14c696578..eb9e31be0772e 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -302,8 +302,9 @@ Sparse ExtensionArray ^^^^^^^^^^^^^^ + - Bug in :meth:`DataFrame.where` when ``other`` is a :class:`Series` with ExtensionArray dtype (:issue:`38729`) -- +- Fixed bug where :meth:`Series.idxmax`, :meth:`Series.idxmin` and ``argmax/min`` fail when the underlying data is :class:`ExtensionArray` (:issue:`32749`, :issue:`33719`, :issue:`36566`) - Other diff --git a/pandas/core/base.py b/pandas/core/base.py index 54dec90c08aa2..afc22a8446dce 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -715,9 +715,17 @@ def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: the minimum cereal calories is the first element, since series is zero-indexed. """ + delegate = self._values nv.validate_minmax_axis(axis) - nv.validate_argmax_with_skipna(skipna, args, kwargs) - return nanops.nanargmax(self._values, skipna=skipna) + skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) + + if isinstance(delegate, ExtensionArray): + if not skipna and delegate.isna().any(): + return -1 + else: + return delegate.argmax() + else: + return nanops.nanargmax(delegate, skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): """ @@ -765,9 +773,17 @@ def min(self, axis=None, skipna: bool = True, *args, **kwargs): @doc(argmax, op="min", oppose="max", value="smallest") def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int: + delegate = self._values nv.validate_minmax_axis(axis) - nv.validate_argmax_with_skipna(skipna, args, kwargs) - return nanops.nanargmin(self._values, skipna=skipna) + skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) + + if isinstance(delegate, ExtensionArray): + if not skipna and delegate.isna().any(): + return -1 + else: + return delegate.argmin() + else: + return nanops.nanargmin(delegate, skipna=skipna) def tolist(self): """ diff --git a/pandas/core/series.py b/pandas/core/series.py index cb753e887b0f8..7f2039c998f53 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2076,8 +2076,7 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs): >>> s.idxmin(skipna=False) nan """ - skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) - i = nanops.nanargmin(self._values, skipna=skipna) + i = self.argmin(None, skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -2147,8 +2146,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs): >>> s.idxmax(skipna=False) nan """ - skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) - i = nanops.nanargmax(self._values, skipna=skipna) + i = self.argmax(None, skipna=skipna) if i == -1: return np.nan return self.index[i] diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 1cc03d4f4f2bd..472e783c977f0 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -107,6 +107,27 @@ def test_argmin_argmax_all_na(self, method, data, na_value): with pytest.raises(ValueError, match=err_msg): getattr(data_na, method)() + @pytest.mark.parametrize( + "op_name, skipna, expected", + [ + ("idxmax", True, 0), + ("idxmin", True, 2), + ("argmax", True, 0), + ("argmin", True, 2), + ("idxmax", False, np.nan), + ("idxmin", False, np.nan), + ("argmax", False, -1), + ("argmin", False, -1), + ], + ) + def test_argreduce_series( + self, data_missing_for_sorting, op_name, skipna, expected + ): + # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing. + ser = pd.Series(data_missing_for_sorting) + result = getattr(ser, op_name)(skipna=skipna) + tm.assert_almost_equal(result, expected) + @pytest.mark.parametrize( "na_position, expected", [
- [x] closes #32749, closes #33719, closes #36566 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Currently `pd.Series([1, 2, 3]).astype('Int8').idxmax()` raises an error. This request proposes a fix. My first time using Github and opening a pull request. Apologise if it's no good.
https://api.github.com/repos/pandas-dev/pandas/pulls/37924
2020-11-17T23:44:09Z
2021-01-01T21:38:40Z
2021-01-01T21:38:39Z
2021-01-02T00:26:01Z
REF: de-duplicate CategoricalIndex.get_indexer
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index d11d4952ff1c7..e561e994778e9 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -521,40 +521,33 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): if self.is_unique and self.equals(target): return np.arange(len(self), dtype="intp") - # Note: we use engine.get_indexer_non_unique below because, even if - # `target` is unique, any non-category entries in it will be encoded - # as -1 by _get_codes_for_get_indexer, so `codes` may not be unique. - codes = self._get_codes_for_get_indexer(target._values) - indexer, _ = self._engine.get_indexer_non_unique(codes) - return ensure_platform_int(indexer) + return self._get_indexer_non_unique(target._values)[0] @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs) def get_indexer_non_unique(self, target): target = ibase.ensure_index(target) + return self._get_indexer_non_unique(target._values) - codes = self._get_codes_for_get_indexer(target._values) - indexer, missing = self._engine.get_indexer_non_unique(codes) - return ensure_platform_int(indexer), missing - - def _get_codes_for_get_indexer(self, target: ArrayLike) -> np.ndarray: + def _get_indexer_non_unique(self, values: ArrayLike): """ - Extract integer codes we can use for comparison. - - Notes - ----- - If a value in target is not present, it gets coded as -1. + get_indexer_non_unique but after unrapping the target Index object. """ + # Note: we use engine.get_indexer_non_unique for get_indexer in addition + # to get_indexer_non_unique because, even if `target` is unique, any + # non-category entries in it will be encoded as -1 so `codes` may + # not be unique. - if isinstance(target, Categorical): + if isinstance(values, Categorical): # Indexing on codes is more efficient if categories are the same, # so we can apply some optimizations based on the degree of # dtype-matching. - cat = self._data._encode_with_my_categories(target) + cat = self._data._encode_with_my_categories(values) codes = cat._codes else: - codes = self.categories.get_indexer(target) + codes = self.categories.get_indexer(values) - return codes + indexer, missing = self._engine.get_indexer_non_unique(codes) + return ensure_platform_int(indexer), missing @doc(Index._convert_list_indexer) def _convert_list_indexer(self, keyarr): diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index df59d09edd3ef..0fa6abb27cb61 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -84,7 +84,7 @@ def test_large_series(self): tm.assert_series_equal(result1, result3) def test_loc_getitem_frame(self): - + # CategoricalIndex with IntervalIndex categories df = DataFrame({"A": range(10)}) s = pd.cut(df.A, 5) df["B"] = s
https://api.github.com/repos/pandas-dev/pandas/pulls/37923
2020-11-17T22:47:09Z
2020-11-18T01:10:05Z
2020-11-18T01:10:05Z
2020-11-18T02:53:10Z
CLN: NDFrame._where, comment typos
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4e6aba1961b64..73f1e7127dca4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9020,7 +9020,6 @@ def _where( cond = -cond if inplace else cond # try to align with other - try_quick = True if isinstance(other, NDFrame): # align with me @@ -9059,12 +9058,11 @@ def _where( # match True cond to other elif len(cond[icond]) == len(other): - # try to not change dtype at first (if try_quick) - if try_quick: - new_other = np.asarray(self) - new_other = new_other.copy() - new_other[icond] = other - other = new_other + # try to not change dtype at first + new_other = np.asarray(self) + new_other = new_other.copy() + new_other[icond] = other + other = new_other else: raise ValueError( diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 967e218078a28..7f071c0d7eed6 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1447,6 +1447,7 @@ def where( if values.ndim - 1 == other.ndim and axis == 1: other = other.reshape(tuple(other.shape + (1,))) elif transpose and values.ndim == self.ndim - 1: + # TODO(EA2D): not neceesssary with 2D EAs cond = cond.T if not hasattr(cond, "shape"): @@ -2413,9 +2414,8 @@ def _can_hold_element(self, element: Any) -> bool: return is_valid_nat_for_dtype(element, self.dtype) def fillna(self, value, **kwargs): - - # allow filling with integers to be - # interpreted as nanoseconds + # TODO(EA2D): if we operated on array_values, TDA.fillna would handle + # raising here. if is_integer(value): # Deprecation GH#24694, GH#19233 raise TypeError( diff --git a/pandas/core/series.py b/pandas/core/series.py index 800da18142825..42a87b003f634 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1015,7 +1015,7 @@ def __setitem__(self, key, value): # positional setter values[key] = value else: - # GH#12862 adding an new key to the Series + # GH#12862 adding a new key to the Series self.loc[key] = value except TypeError as err: diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index fa8f85178ba9f..6aad2cadf78ba 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -757,7 +757,7 @@ def test_align_date_objects_with_datetimeindex(self): @pytest.mark.parametrize("box", [list, tuple, np.array, pd.Index, pd.Series, pd.array]) @pytest.mark.parametrize("flex", [True, False]) def test_series_ops_name_retention(flex, box, names, all_binary_operators): - # GH#33930 consistent name renteiton + # GH#33930 consistent name retention op = all_binary_operators if op is ops.rfloordiv and box in [list, tuple]:
https://api.github.com/repos/pandas-dev/pandas/pulls/37922
2020-11-17T22:38:13Z
2020-11-18T00:56:23Z
2020-11-18T00:56:23Z
2020-11-18T02:52:16Z
PERF: Introducing HashTables for datatypes with 8,16 and 32 bits
diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd index 75c273b35ee7d..7b630c264753f 100644 --- a/pandas/_libs/hashtable.pxd +++ b/pandas/_libs/hashtable.pxd @@ -1,13 +1,27 @@ from numpy cimport intp_t, ndarray from pandas._libs.khash cimport ( + float32_t, float64_t, + int8_t, + int16_t, + int32_t, int64_t, + kh_float32_t, kh_float64_t, + kh_int8_t, + kh_int16_t, + kh_int32_t, kh_int64_t, kh_pymap_t, kh_str_t, + kh_uint8_t, + kh_uint16_t, + kh_uint32_t, kh_uint64_t, + uint8_t, + uint16_t, + uint32_t, uint64_t, ) @@ -28,12 +42,54 @@ cdef class Int64HashTable(HashTable): cpdef get_item(self, int64_t val) cpdef set_item(self, int64_t key, Py_ssize_t val) +cdef class UInt32HashTable(HashTable): + cdef kh_uint32_t *table + + cpdef get_item(self, uint32_t val) + cpdef set_item(self, uint32_t key, Py_ssize_t val) + +cdef class Int32HashTable(HashTable): + cdef kh_int32_t *table + + cpdef get_item(self, int32_t val) + cpdef set_item(self, int32_t key, Py_ssize_t val) + +cdef class UInt16HashTable(HashTable): + cdef kh_uint16_t *table + + cpdef get_item(self, uint16_t val) + cpdef set_item(self, uint16_t key, Py_ssize_t val) + +cdef class Int16HashTable(HashTable): + cdef kh_int16_t *table + + cpdef get_item(self, int16_t val) + cpdef set_item(self, int16_t key, Py_ssize_t val) + +cdef class UInt8HashTable(HashTable): + cdef kh_uint8_t *table + + cpdef get_item(self, uint8_t val) + cpdef set_item(self, uint8_t key, Py_ssize_t val) + +cdef class Int8HashTable(HashTable): + cdef kh_int8_t *table + + cpdef get_item(self, int8_t val) + cpdef set_item(self, int8_t key, Py_ssize_t val) + cdef class Float64HashTable(HashTable): cdef kh_float64_t *table cpdef get_item(self, float64_t val) cpdef set_item(self, float64_t key, Py_ssize_t val) +cdef class Float32HashTable(HashTable): + cdef kh_float32_t *table + + cpdef get_item(self, float32_t val) + cpdef set_item(self, float32_t key, Py_ssize_t val) + cdef class PyObjectHashTable(HashTable): cdef kh_pymap_t *table diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 5a0cddb0af197..cc080a87cfb5b 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -13,45 +13,7 @@ cnp.import_array() from pandas._libs cimport util -from pandas._libs.khash cimport ( - kh_destroy_float64, - kh_destroy_int64, - kh_destroy_pymap, - kh_destroy_str, - kh_destroy_uint64, - kh_exist_float64, - kh_exist_int64, - kh_exist_pymap, - kh_exist_str, - kh_exist_uint64, - kh_float64_t, - kh_get_float64, - kh_get_int64, - kh_get_pymap, - kh_get_str, - kh_get_strbox, - kh_get_uint64, - kh_init_float64, - kh_init_int64, - kh_init_pymap, - kh_init_str, - kh_init_strbox, - kh_init_uint64, - kh_int64_t, - kh_put_float64, - kh_put_int64, - kh_put_pymap, - kh_put_str, - kh_put_strbox, - kh_put_uint64, - kh_resize_float64, - kh_resize_int64, - kh_resize_pymap, - kh_resize_str, - kh_resize_uint64, - kh_str_t, - khiter_t, -) +from pandas._libs.khash cimport kh_str_t, khiter_t from pandas._libs.missing cimport checknull diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index da91fa69b0dec..f7001c165870e 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -5,6 +5,35 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in """ +{{py: + +# name +cimported_types = ['float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'pymap', + 'str', + 'strbox', + 'uint8', + 'uint16', + 'uint32', + 'uint64'] +}} + +{{for name in cimported_types}} +from pandas._libs.khash cimport ( + kh_destroy_{{name}}, + kh_exist_{{name}}, + kh_get_{{name}}, + kh_init_{{name}}, + kh_put_{{name}}, + kh_resize_{{name}}, +) +{{endfor}} + # ---------------------------------------------------------------------- # VectorData # ---------------------------------------------------------------------- @@ -20,9 +49,16 @@ from pandas._libs.missing cimport C_NA # for uniques in hashtables) dtypes = [('Float64', 'float64', 'float64_t'), + ('Float32', 'float32', 'float32_t'), ('Int64', 'int64', 'int64_t'), + ('Int32', 'int32', 'int32_t'), + ('Int16', 'int16', 'int16_t'), + ('Int8', 'int8', 'int8_t'), ('String', 'string', 'char *'), - ('UInt64', 'uint64', 'uint64_t')] + ('UInt64', 'uint64', 'uint64_t'), + ('UInt32', 'uint32', 'uint32_t'), + ('UInt16', 'uint16', 'uint16_t'), + ('UInt8', 'uint8', 'uint8_t')] }} {{for name, dtype, c_type in dtypes}} @@ -49,8 +85,15 @@ cdef inline void append_data_{{dtype}}({{name}}VectorData *data, ctypedef fused vector_data: Int64VectorData + Int32VectorData + Int16VectorData + Int8VectorData UInt64VectorData + UInt32VectorData + UInt16VectorData + UInt8VectorData Float64VectorData + Float32VectorData StringVectorData cdef inline bint needs_resize(vector_data *data) nogil: @@ -65,7 +108,14 @@ cdef inline bint needs_resize(vector_data *data) nogil: # name, dtype, c_type dtypes = [('Float64', 'float64', 'float64_t'), ('UInt64', 'uint64', 'uint64_t'), - ('Int64', 'int64', 'int64_t')] + ('Int64', 'int64', 'int64_t'), + ('Float32', 'float32', 'float32_t'), + ('UInt32', 'uint32', 'uint32_t'), + ('Int32', 'int32', 'int32_t'), + ('UInt16', 'uint16', 'uint16_t'), + ('Int16', 'int16', 'int16_t'), + ('UInt8', 'uint8', 'uint8_t'), + ('Int8', 'int8', 'int8_t')] }} @@ -253,15 +303,22 @@ cdef class HashTable: {{py: -# name, dtype, float_group, default_na_value -dtypes = [('Float64', 'float64', True, 'np.nan'), - ('UInt64', 'uint64', False, 0), - ('Int64', 'int64', False, 'NPY_NAT')] +# name, dtype, float_group +dtypes = [('Float64', 'float64', True), + ('UInt64', 'uint64', False), + ('Int64', 'int64', False), + ('Float32', 'float32', True), + ('UInt32', 'uint32', False), + ('Int32', 'int32', False), + ('UInt16', 'uint16', False), + ('Int16', 'int16', False), + ('UInt8', 'uint8', False), + ('Int8', 'int8', False)] }} -{{for name, dtype, float_group, default_na_value in dtypes}} +{{for name, dtype, float_group in dtypes}} cdef class {{name}}HashTable(HashTable): @@ -430,7 +487,7 @@ cdef class {{name}}HashTable(HashTable): # which is only used if it's *specified*. na_value2 = <{{dtype}}_t>na_value else: - na_value2 = {{default_na_value}} + na_value2 = 0 with nogil: for i in range(n): diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index 4a466ada765ca..7c5afa4ff6b27 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -8,9 +8,16 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in # dtype, ttype, c_type dtypes = [('float64', 'float64', 'float64_t'), + ('float32', 'float32', 'float32_t'), ('uint64', 'uint64', 'uint64_t'), + ('uint32', 'uint32', 'uint32_t'), + ('uint16', 'uint16', 'uint16_t'), + ('uint8', 'uint8', 'uint8_t'), ('object', 'pymap', 'object'), - ('int64', 'int64', 'int64_t')] + ('int64', 'int64', 'int64_t'), + ('int32', 'int32', 'int32_t'), + ('int16', 'int16', 'int16_t'), + ('int8', 'int8', 'int8_t')] }} @@ -54,7 +61,7 @@ cdef build_count_table_{{dtype}}({{dtype}}_t[:] values, for i in range(n): val = values[i] - {{if dtype == 'float64'}} + {{if dtype == 'float64' or dtype == 'float32'}} if val == val or not dropna: {{else}} if True: @@ -275,8 +282,15 @@ def ismember_{{dtype}}(const {{c_type}}[:] arr, const {{c_type}}[:] values): # dtype, ctype, table_type, npy_dtype dtypes = [('float64', 'float64_t', 'float64', 'float64'), + ('float32', 'float32_t', 'float32', 'float32'), ('int64', 'int64_t', 'int64', 'int64'), + ('int32', 'int32_t', 'int32', 'int32'), + ('int16', 'int16_t', 'int16', 'int16'), + ('int8', 'int8_t', 'int8', 'int8'), ('uint64', 'uint64_t', 'uint64', 'uint64'), + ('uint32', 'uint32_t', 'uint32', 'uint32'), + ('uint16', 'uint16_t', 'uint16', 'uint16'), + ('uint8', 'uint8_t', 'uint8', 'uint8'), ('object', 'object', 'pymap', 'object_')] }} diff --git a/pandas/_libs/khash.pxd b/pandas/_libs/khash.pxd index 1bb3a158b4b1a..8b082747bf22b 100644 --- a/pandas/_libs/khash.pxd +++ b/pandas/_libs/khash.pxd @@ -1,5 +1,16 @@ from cpython.object cimport PyObject -from numpy cimport float64_t, int32_t, int64_t, uint32_t, uint64_t +from numpy cimport ( + float32_t, + float64_t, + int8_t, + int16_t, + int32_t, + int64_t, + uint8_t, + uint16_t, + uint32_t, + uint64_t, +) cdef extern from "khash_python.h": @@ -67,72 +78,6 @@ cdef extern from "khash_python.h": void kh_destroy_str_starts(kh_str_starts_t*) nogil void kh_resize_str_starts(kh_str_starts_t*, khint_t) nogil - ctypedef struct kh_int64_t: - khint_t n_buckets, size, n_occupied, upper_bound - uint32_t *flags - int64_t *keys - size_t *vals - - kh_int64_t* kh_init_int64() nogil - void kh_destroy_int64(kh_int64_t*) nogil - void kh_clear_int64(kh_int64_t*) nogil - khint_t kh_get_int64(kh_int64_t*, int64_t) nogil - void kh_resize_int64(kh_int64_t*, khint_t) nogil - khint_t kh_put_int64(kh_int64_t*, int64_t, int*) nogil - void kh_del_int64(kh_int64_t*, khint_t) nogil - - bint kh_exist_int64(kh_int64_t*, khiter_t) nogil - - ctypedef uint64_t khuint64_t - - ctypedef struct kh_uint64_t: - khint_t n_buckets, size, n_occupied, upper_bound - uint32_t *flags - khuint64_t *keys - size_t *vals - - kh_uint64_t* kh_init_uint64() nogil - void kh_destroy_uint64(kh_uint64_t*) nogil - void kh_clear_uint64(kh_uint64_t*) nogil - khint_t kh_get_uint64(kh_uint64_t*, uint64_t) nogil - void kh_resize_uint64(kh_uint64_t*, khint_t) nogil - khint_t kh_put_uint64(kh_uint64_t*, uint64_t, int*) nogil - void kh_del_uint64(kh_uint64_t*, khint_t) nogil - - bint kh_exist_uint64(kh_uint64_t*, khiter_t) nogil - - ctypedef struct kh_float64_t: - khint_t n_buckets, size, n_occupied, upper_bound - uint32_t *flags - float64_t *keys - size_t *vals - - kh_float64_t* kh_init_float64() nogil - void kh_destroy_float64(kh_float64_t*) nogil - void kh_clear_float64(kh_float64_t*) nogil - khint_t kh_get_float64(kh_float64_t*, float64_t) nogil - void kh_resize_float64(kh_float64_t*, khint_t) nogil - khint_t kh_put_float64(kh_float64_t*, float64_t, int*) nogil - void kh_del_float64(kh_float64_t*, khint_t) nogil - - bint kh_exist_float64(kh_float64_t*, khiter_t) nogil - - ctypedef struct kh_int32_t: - khint_t n_buckets, size, n_occupied, upper_bound - uint32_t *flags - int32_t *keys - size_t *vals - - kh_int32_t* kh_init_int32() nogil - void kh_destroy_int32(kh_int32_t*) nogil - void kh_clear_int32(kh_int32_t*) nogil - khint_t kh_get_int32(kh_int32_t*, int32_t) nogil - void kh_resize_int32(kh_int32_t*, khint_t) nogil - khint_t kh_put_int32(kh_int32_t*, int32_t, int*) nogil - void kh_del_int32(kh_int32_t*, khint_t) nogil - - bint kh_exist_int32(kh_int32_t*, khiter_t) nogil - # sweep factorize ctypedef struct kh_strbox_t: @@ -150,3 +95,5 @@ cdef extern from "khash_python.h": void kh_del_strbox(kh_strbox_t*, khint_t) nogil bint kh_exist_strbox(kh_strbox_t*, khiter_t) nogil + +include "khash_for_primitive_helper.pxi" diff --git a/pandas/_libs/khash_for_primitive_helper.pxi.in b/pandas/_libs/khash_for_primitive_helper.pxi.in new file mode 100644 index 0000000000000..db8d3e0b19417 --- /dev/null +++ b/pandas/_libs/khash_for_primitive_helper.pxi.in @@ -0,0 +1,42 @@ +""" +Template for wrapping khash-tables for each primitive `dtype` + +WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in +""" + +{{py: + +# name, c_type +primitive_types = [('int64', 'int64_t'), + ('uint64', 'uint64_t'), + ('float64', 'float64_t'), + ('int32', 'int32_t'), + ('uint32', 'uint32_t'), + ('float32', 'float32_t'), + ('int16', 'int16_t'), + ('uint16', 'uint16_t'), + ('int8', 'int8_t'), + ('uint8', 'uint8_t'), + ] +}} + +{{for name, c_type in primitive_types}} + +cdef extern from "khash_python.h": + ctypedef struct kh_{{name}}_t: + khint_t n_buckets, size, n_occupied, upper_bound + uint32_t *flags + {{c_type}} *keys + size_t *vals + + kh_{{name}}_t* kh_init_{{name}}() nogil + void kh_destroy_{{name}}(kh_{{name}}_t*) nogil + void kh_clear_{{name}}(kh_{{name}}_t*) nogil + khint_t kh_get_{{name}}(kh_{{name}}_t*, {{c_type}}) nogil + void kh_resize_{{name}}(kh_{{name}}_t*, khint_t) nogil + khint_t kh_put_{{name}}(kh_{{name}}_t*, {{c_type}}, int*) nogil + void kh_del_{{name}}(kh_{{name}}_t*, khint_t) nogil + + bint kh_exist_{{name}}(kh_{{name}}_t*, khiter_t) nogil + +{{endfor}} diff --git a/pandas/_libs/src/klib/khash.h b/pandas/_libs/src/klib/khash.h index 61a4e80ea8cbc..ecd15d1893c23 100644 --- a/pandas/_libs/src/klib/khash.h +++ b/pandas/_libs/src/klib/khash.h @@ -122,14 +122,23 @@ typedef unsigned long khint32_t; #endif #if ULONG_MAX == ULLONG_MAX -typedef unsigned long khuint64_t; -typedef signed long khint64_t; +typedef unsigned long khint64_t; #else -typedef unsigned long long khuint64_t; -typedef signed long long khint64_t; +typedef unsigned long long khint64_t; +#endif + +#if UINT_MAX == 0xffffu +typedef unsigned int khint16_t; +#elif USHRT_MAX == 0xffffu +typedef unsigned short khint16_t; +#endif + +#if UCHAR_MAX == 0xffu +typedef unsigned char khint8_t; #endif typedef double khfloat64_t; +typedef double khfloat32_t; typedef khint32_t khint_t; typedef khint_t khiter_t; @@ -588,15 +597,25 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key) @param name Name of the hash table [symbol] @param khval_t Type of values [type] */ + +// we implicitly convert signed int to unsigned int, thus potential overflows +// for operations (<<,*,+) don't trigger undefined behavior, also >>-operator +// is implementation defined for signed ints if sign-bit is set. +// because we never really "get" the keys, there will be no convertion from +// unsigend int to (signed) int (which would be implementation defined behavior) +// this holds also for 64-, 16- and 8-bit integers #define KHASH_MAP_INIT_INT(name, khval_t) \ KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) +#define KHASH_MAP_INIT_UINT(name, khval_t) \ + KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) + /*! @function @abstract Instantiate a hash map containing 64-bit integer keys @param name Name of the hash table [symbol] */ #define KHASH_SET_INIT_UINT64(name) \ - KHASH_INIT(name, khuint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal) + KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal) #define KHASH_SET_INIT_INT64(name) \ KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal) @@ -607,11 +626,34 @@ PANDAS_INLINE khint_t __ac_Wang_hash(khint_t key) @param khval_t Type of values [type] */ #define KHASH_MAP_INIT_UINT64(name, khval_t) \ - KHASH_INIT(name, khuint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal) + KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal) #define KHASH_MAP_INIT_INT64(name, khval_t) \ KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal) +/*! @function + @abstract Instantiate a hash map containing 16bit-integer keys + @param name Name of the hash table [symbol] + @param khval_t Type of values [type] + */ +#define KHASH_MAP_INIT_INT16(name, khval_t) \ + KHASH_INIT(name, khint16_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) + +#define KHASH_MAP_INIT_UINT16(name, khval_t) \ + KHASH_INIT(name, khint16_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) + +/*! @function + @abstract Instantiate a hash map containing 8bit-integer keys + @param name Name of the hash table [symbol] + @param khval_t Type of values [type] + */ +#define KHASH_MAP_INIT_INT8(name, khval_t) \ + KHASH_INIT(name, khint8_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) + +#define KHASH_MAP_INIT_UINT8(name, khval_t) \ + KHASH_INIT(name, khint8_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) + + typedef const char *kh_cstr_t; /*! @function @@ -634,12 +676,23 @@ typedef const char *kh_cstr_t; #define kh_exist_float64(h, k) (kh_exist(h, k)) #define kh_exist_uint64(h, k) (kh_exist(h, k)) #define kh_exist_int64(h, k) (kh_exist(h, k)) +#define kh_exist_float32(h, k) (kh_exist(h, k)) #define kh_exist_int32(h, k) (kh_exist(h, k)) +#define kh_exist_uint32(h, k) (kh_exist(h, k)) +#define kh_exist_int16(h, k) (kh_exist(h, k)) +#define kh_exist_uint16(h, k) (kh_exist(h, k)) +#define kh_exist_int8(h, k) (kh_exist(h, k)) +#define kh_exist_uint8(h, k) (kh_exist(h, k)) KHASH_MAP_INIT_STR(str, size_t) KHASH_MAP_INIT_INT(int32, size_t) +KHASH_MAP_INIT_UINT(uint32, size_t) KHASH_MAP_INIT_INT64(int64, size_t) KHASH_MAP_INIT_UINT64(uint64, size_t) +KHASH_MAP_INIT_INT16(int16, size_t) +KHASH_MAP_INIT_UINT16(uint16, size_t) +KHASH_MAP_INIT_INT16(int8, size_t) +KHASH_MAP_INIT_UINT16(uint8, size_t) #endif /* __AC_KHASH_H */ diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h index aebc229abddd2..c37f0e950baa7 100644 --- a/pandas/_libs/src/klib/khash_python.h +++ b/pandas/_libs/src/klib/khash_python.h @@ -23,6 +23,12 @@ khint64_t PANDAS_INLINE asint64(double key) { return val; } +khint32_t PANDAS_INLINE asint32(float key) { + khint32_t val; + memcpy(&val, &key, sizeof(float)); + return val; +} + #define ZERO_HASH 0 #define NAN_HASH 0 @@ -39,13 +45,31 @@ khint32_t PANDAS_INLINE kh_float64_hash_func(double val){ return murmur2_64to32(as_int); } -#define kh_float64_hash_equal(a, b) ((a) == (b) || ((b) != (b) && (a) != (a))) +khint32_t PANDAS_INLINE kh_float32_hash_func(float val){ + // 0.0 and -0.0 should have the same hash: + if (val == 0.0f){ + return ZERO_HASH; + } + // all nans should have the same hash: + if ( val!=val ){ + return NAN_HASH; + } + khint32_t as_int = asint32(val); + return murmur2_32to32(as_int); +} + +#define kh_floats_hash_equal(a, b) ((a) == (b) || ((b) != (b) && (a) != (a))) #define KHASH_MAP_INIT_FLOAT64(name, khval_t) \ - KHASH_INIT(name, khfloat64_t, khval_t, 1, kh_float64_hash_func, kh_float64_hash_equal) + KHASH_INIT(name, khfloat64_t, khval_t, 1, kh_float64_hash_func, kh_floats_hash_equal) KHASH_MAP_INIT_FLOAT64(float64, size_t) +#define KHASH_MAP_INIT_FLOAT32(name, khval_t) \ + KHASH_INIT(name, khfloat32_t, khval_t, 1, kh_float32_hash_func, kh_floats_hash_equal) + +KHASH_MAP_INIT_FLOAT32(float32, size_t) + int PANDAS_INLINE pyobject_cmp(PyObject* a, PyObject* b) { int result = PyObject_RichCompareBool(a, b, Py_EQ); diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py new file mode 100644 index 0000000000000..5ef110e9672f0 --- /dev/null +++ b/pandas/tests/libs/test_hashtable.py @@ -0,0 +1,265 @@ +import numpy as np +import pytest + +from pandas._libs import hashtable as ht + +import pandas._testing as tm + + +@pytest.mark.parametrize( + "table_type, dtype", + [ + (ht.Int64HashTable, np.int64), + (ht.UInt64HashTable, np.uint64), + (ht.Float64HashTable, np.float64), + (ht.Int32HashTable, np.int32), + (ht.UInt32HashTable, np.uint32), + (ht.Float32HashTable, np.float32), + (ht.Int16HashTable, np.int16), + (ht.UInt16HashTable, np.uint16), + (ht.Int8HashTable, np.int8), + (ht.UInt8HashTable, np.uint8), + ], +) +class TestHashTable: + def test_get_set_contains_len(self, table_type, dtype): + index = 5 + table = table_type(55) + assert len(table) == 0 + assert index not in table + + table.set_item(index, 42) + assert len(table) == 1 + assert index in table + assert table.get_item(index) == 42 + + table.set_item(index + 1, 41) + assert index in table + assert index + 1 in table + assert len(table) == 2 + assert table.get_item(index) == 42 + assert table.get_item(index + 1) == 41 + + table.set_item(index, 21) + assert index in table + assert index + 1 in table + assert len(table) == 2 + assert table.get_item(index) == 21 + assert table.get_item(index + 1) == 41 + assert index + 2 not in table + + with pytest.raises(KeyError) as excinfo: + table.get_item(index + 2) + assert str(index + 2) in str(excinfo.value) + + def test_map(self, table_type, dtype): + N = 77 + table = table_type() + keys = np.arange(N).astype(dtype) + vals = np.arange(N).astype(np.int64) + N + table.map(keys, vals) + for i in range(N): + assert table.get_item(keys[i]) == i + N + + def test_map_locations(self, table_type, dtype): + N = 8 + table = table_type() + keys = (np.arange(N) + N).astype(dtype) + table.map_locations(keys) + for i in range(N): + assert table.get_item(keys[i]) == i + + def test_lookup(self, table_type, dtype): + N = 3 + table = table_type() + keys = (np.arange(N) + N).astype(dtype) + table.map_locations(keys) + result = table.lookup(keys) + expected = np.arange(N) + tm.assert_numpy_array_equal(result.astype(np.int64), expected.astype(np.int64)) + + def test_lookup_wrong(self, table_type, dtype): + if dtype in (np.int8, np.uint8): + N = 100 + else: + N = 512 + table = table_type() + keys = (np.arange(N) + N).astype(dtype) + table.map_locations(keys) + wrong_keys = np.arange(N).astype(dtype) + result = table.lookup(wrong_keys) + assert np.all(result == -1) + + def test_unique(self, table_type, dtype): + if dtype in (np.int8, np.uint8): + N = 88 + else: + N = 1000 + table = table_type() + expected = (np.arange(N) + N).astype(dtype) + keys = np.repeat(expected, 5) + unique = table.unique(keys) + tm.assert_numpy_array_equal(unique, expected) + + +@pytest.mark.parametrize( + "table_type, dtype", + [ + (ht.Float64HashTable, np.float64), + (ht.Float32HashTable, np.float32), + ], +) +class TestHashTableWithNans: + def test_get_set_contains_len(self, table_type, dtype): + index = float("nan") + table = table_type() + assert index not in table + + table.set_item(index, 42) + assert len(table) == 1 + assert index in table + assert table.get_item(index) == 42 + + table.set_item(index, 41) + assert len(table) == 1 + assert index in table + assert table.get_item(index) == 41 + + def test_map(self, table_type, dtype): + N = 332 + table = table_type() + keys = np.full(N, np.nan, dtype=dtype) + vals = (np.arange(N) + N).astype(np.int64) + table.map(keys, vals) + assert len(table) == 1 + assert table.get_item(np.nan) == 2 * N - 1 + + def test_map_locations(self, table_type, dtype): + N = 10 + table = table_type() + keys = np.full(N, np.nan, dtype=dtype) + table.map_locations(keys) + assert len(table) == 1 + assert table.get_item(np.nan) == N - 1 + + def test_unique(self, table_type, dtype): + N = 1020 + table = table_type() + keys = np.full(N, np.nan, dtype=dtype) + unique = table.unique(keys) + assert np.all(np.isnan(unique)) and len(unique) == 1 + + +def get_ht_function(fun_name, type_suffix): + return getattr(ht, fun_name + "_" + type_suffix) + + +@pytest.mark.parametrize( + "dtype, type_suffix", + [ + (np.int64, "int64"), + (np.uint64, "uint64"), + (np.float64, "float64"), + (np.int32, "int32"), + (np.uint32, "uint32"), + (np.float32, "float32"), + (np.int16, "int16"), + (np.uint16, "uint16"), + (np.int8, "int8"), + (np.uint8, "uint8"), + ], +) +class TestHelpFunctions: + def test_value_count(self, dtype, type_suffix): + N = 43 + value_count = get_ht_function("value_count", type_suffix) + expected = (np.arange(N) + N).astype(dtype) + values = np.repeat(expected, 5) + keys, counts = value_count(values, False) + tm.assert_numpy_array_equal(np.sort(keys), expected) + assert np.all(counts == 5) + + def test_duplicated_first(self, dtype, type_suffix): + N = 100 + duplicated = get_ht_function("duplicated", type_suffix) + values = np.repeat(np.arange(N).astype(dtype), 5) + result = duplicated(values) + expected = np.ones_like(values, dtype=np.bool_) + expected[::5] = False + tm.assert_numpy_array_equal(result, expected) + + def test_ismember_yes(self, dtype, type_suffix): + N = 127 + ismember = get_ht_function("ismember", type_suffix) + arr = np.arange(N).astype(dtype) + values = np.arange(N).astype(dtype) + result = ismember(arr, values) + expected = np.ones_like(values, dtype=np.bool_) + tm.assert_numpy_array_equal(result, expected) + + def test_ismember_no(self, dtype, type_suffix): + N = 17 + ismember = get_ht_function("ismember", type_suffix) + arr = np.arange(N).astype(dtype) + values = (np.arange(N) + N).astype(dtype) + result = ismember(arr, values) + expected = np.zeros_like(values, dtype=np.bool_) + tm.assert_numpy_array_equal(result, expected) + + def test_mode(self, dtype, type_suffix): + if dtype in (np.int8, np.uint8): + N = 53 + else: + N = 11111 + mode = get_ht_function("mode", type_suffix) + values = np.repeat(np.arange(N).astype(dtype), 5) + values[0] = 42 + result = mode(values, False) + assert result == 42 + + +@pytest.mark.parametrize( + "dtype, type_suffix", + [ + (np.float64, "float64"), + (np.float32, "float32"), + ], +) +class TestHelpFunctionsWithNans: + def test_value_count(self, dtype, type_suffix): + value_count = get_ht_function("value_count", type_suffix) + values = np.array([np.nan, np.nan, np.nan], dtype=dtype) + keys, counts = value_count(values, True) + assert len(keys) == 0 + keys, counts = value_count(values, False) + assert len(keys) == 1 and np.all(np.isnan(keys)) + assert counts[0] == 3 + + def test_duplicated_first(self, dtype, type_suffix): + duplicated = get_ht_function("duplicated", type_suffix) + values = np.array([np.nan, np.nan, np.nan], dtype=dtype) + result = duplicated(values) + expected = np.array([False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_ismember_yes(self, dtype, type_suffix): + ismember = get_ht_function("ismember", type_suffix) + arr = np.array([np.nan, np.nan, np.nan], dtype=dtype) + values = np.array([np.nan, np.nan], dtype=dtype) + result = ismember(arr, values) + expected = np.array([True, True, True], dtype=np.bool_) + tm.assert_numpy_array_equal(result, expected) + + def test_ismember_no(self, dtype, type_suffix): + ismember = get_ht_function("ismember", type_suffix) + arr = np.array([np.nan, np.nan, np.nan], dtype=dtype) + values = np.array([1], dtype=dtype) + result = ismember(arr, values) + expected = np.array([False, False, False], dtype=np.bool_) + tm.assert_numpy_array_equal(result, expected) + + def test_mode(self, dtype, type_suffix): + mode = get_ht_function("mode", type_suffix) + values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype) + assert mode(values, True) == 42 + assert np.isnan(mode(values, False)) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 34b7d0e73e914..3b6f5d145b500 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1517,6 +1517,7 @@ def test_get_unique(self): (ht.StringHashTable, ht.ObjectVector, "object", True), (ht.Float64HashTable, ht.Float64Vector, "float64", False), (ht.Int64HashTable, ht.Int64Vector, "int64", False), + (ht.Int32HashTable, ht.Int32Vector, "int32", False), (ht.UInt64HashTable, ht.UInt64Vector, "uint64", False), ], ) @@ -1640,6 +1641,7 @@ def test_hashtable_factorize(self, htable, tm_dtype, writable): ht.StringHashTable, ht.Float64HashTable, ht.Int64HashTable, + ht.Int32HashTable, ht.UInt64HashTable, ], ) diff --git a/setup.py b/setup.py index 78a789c808efb..9f33c045df6ed 100755 --- a/setup.py +++ b/setup.py @@ -53,6 +53,7 @@ def is_platform_mac(): "hashtable": [ "_libs/hashtable_class_helper.pxi.in", "_libs/hashtable_func_helper.pxi.in", + "_libs/khash_for_primitive_helper.pxi.in", ], "index": ["_libs/index_class_helper.pxi.in"], "sparse": ["_libs/sparse_op_helper.pxi.in"], @@ -525,7 +526,10 @@ def srcpath(name=None, suffix=".pyx", subdir="src"): "_libs.hashtable": { "pyxfile": "_libs/hashtable", "include": klib_include, - "depends": (["pandas/_libs/src/klib/khash_python.h"] + _pxi_dep["hashtable"]), + "depends": ( + ["pandas/_libs/src/klib/khash_python.h", "pandas/_libs/src/klib/khash.h"] + + _pxi_dep["hashtable"] + ), }, "_libs.index": { "pyxfile": "_libs/index",
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Starts work on #33287 Proof of concept for 32bit/16bit hash tables. If works it should become blue print for UInt32/Int16/UInt16/Float32-HashTables.
https://api.github.com/repos/pandas-dev/pandas/pulls/37920
2020-11-17T22:35:22Z
2020-11-21T22:22:24Z
2020-11-21T22:22:24Z
2020-11-21T22:59:07Z
REF: de-duplicate pointwise get_indexer for IntervalIndex
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 21264b00b91f8..b0f8be986fe5d 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -732,17 +732,7 @@ def get_indexer( indexer = self._engine.get_indexer(target_as_index.values) else: # heterogeneous scalar index: defer elementwise to get_loc - # (non-overlapping so get_loc guarantees scalar of KeyError) - indexer = [] - for key in target_as_index: - try: - loc = self.get_loc(key) - except KeyError: - loc = -1 - except InvalidIndexError as err: - # i.e. non-scalar key - raise TypeError(key) from err - indexer.append(loc) + return self._get_indexer_pointwise(target_as_index)[0] return ensure_platform_int(indexer) @@ -766,18 +756,8 @@ def get_indexer_non_unique( target_as_index, IntervalIndex ): # target_as_index might contain intervals: defer elementwise to get_loc - indexer, missing = [], [] - for i, key in enumerate(target_as_index): - try: - locs = self.get_loc(key) - if isinstance(locs, slice): - locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp") - locs = np.array(locs, ndmin=1) - except KeyError: - missing.append(i) - locs = np.array([-1]) - indexer.append(locs) - indexer = np.concatenate(indexer) + return self._get_indexer_pointwise(target_as_index) + else: target_as_index = self._maybe_convert_i8(target_as_index) indexer, missing = self._engine.get_indexer_non_unique( @@ -786,6 +766,30 @@ def get_indexer_non_unique( return ensure_platform_int(indexer), ensure_platform_int(missing) + def _get_indexer_pointwise(self, target: Index) -> Tuple[np.ndarray, np.ndarray]: + """ + pointwise implementation for get_indexer and get_indexer_non_unique. + """ + indexer, missing = [], [] + for i, key in enumerate(target): + try: + locs = self.get_loc(key) + if isinstance(locs, slice): + # Only needed for get_indexer_non_unique + locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp") + locs = np.array(locs, ndmin=1) + except KeyError: + missing.append(i) + locs = np.array([-1]) + except InvalidIndexError as err: + # i.e. non-scalar key + raise TypeError(key) from err + + indexer.append(locs) + + indexer = np.concatenate(indexer) + return ensure_platform_int(indexer), ensure_platform_int(missing) + @property def _index_as_unique(self): return not self.is_overlapping
https://api.github.com/repos/pandas-dev/pandas/pulls/37919
2020-11-17T22:19:56Z
2020-11-18T13:23:24Z
2020-11-18T13:23:24Z
2020-11-18T16:10:19Z
Deprecate inplace in Categorical.remove_unused_categories
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 62da3c0c5cddc..004db48c53f7d 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -458,6 +458,7 @@ Deprecations - :meth:`Categorical.is_dtype_equal` and :meth:`CategoricalIndex.is_dtype_equal` are deprecated, will be removed in a future version (:issue:`37545`) - :meth:`Series.slice_shift` and :meth:`DataFrame.slice_shift` are deprecated, use :meth:`Series.shift` or :meth:`DataFrame.shift` instead (:issue:`37601`) - Partial slicing on unordered :class:`DatetimeIndexes` with keys, which are not in Index is deprecated and will be removed in a future version (:issue:`18531`) +- The ``inplace`` parameter of :meth:`Categorical.remove_unused_categories` is deprecated and will be removed in a future version (:issue:`37643`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 3dec27d4af1ce..e57ef17272a9d 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -10,6 +10,7 @@ from pandas._config import get_option from pandas._libs import NaT, algos as libalgos, hashtable as htable +from pandas._libs.lib import no_default from pandas._typing import ArrayLike, Dtype, Ordered, Scalar from pandas.compat.numpy import function as nv from pandas.util._decorators import cache_readonly, deprecate_kwarg @@ -1046,7 +1047,7 @@ def remove_categories(self, removals, inplace=False): new_categories, ordered=self.ordered, rename=False, inplace=inplace ) - def remove_unused_categories(self, inplace=False): + def remove_unused_categories(self, inplace=no_default): """ Remove categories which are not used. @@ -1056,6 +1057,8 @@ def remove_unused_categories(self, inplace=False): Whether or not to drop unused categories inplace or return a copy of this categorical with unused categories dropped. + .. deprecated:: 1.2.0 + Returns ------- cat : Categorical or None @@ -1069,6 +1072,17 @@ def remove_unused_categories(self, inplace=False): remove_categories : Remove the specified categories. set_categories : Set the categories to the specified ones. """ + if inplace is not no_default: + warn( + "The `inplace` parameter in pandas.Categorical." + "remove_unused_categories is deprecated and " + "will be removed in a future version.", + FutureWarning, + stacklevel=2, + ) + else: + inplace = False + inplace = validate_bool_kwarg(inplace, "inplace") cat = self if inplace else self.copy() idx, inv = np.unique(cat._codes, return_inverse=True) diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py index 98dcdd1692117..7bd7d29ec9703 100644 --- a/pandas/tests/arrays/categorical/test_analytics.py +++ b/pandas/tests/arrays/categorical/test_analytics.py @@ -355,7 +355,9 @@ def test_validate_inplace_raises(self, value): cat.remove_categories(removals=["D", "E", "F"], inplace=value) with pytest.raises(ValueError, match=msg): - cat.remove_unused_categories(inplace=value) + with tm.assert_produces_warning(FutureWarning): + # issue #37643 inplace kwarg deprecated + cat.remove_unused_categories(inplace=value) with pytest.raises(ValueError, match=msg): cat.sort_values(inplace=value) diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py index 6fce4b4145ff2..98b0f978c5f59 100644 --- a/pandas/tests/arrays/categorical/test_api.py +++ b/pandas/tests/arrays/categorical/test_api.py @@ -371,7 +371,10 @@ def test_remove_unused_categories(self): tm.assert_index_equal(res.categories, exp_categories_dropped) tm.assert_index_equal(c.categories, exp_categories_all) - res = c.remove_unused_categories(inplace=True) + with tm.assert_produces_warning(FutureWarning): + # issue #37643 inplace kwarg deprecated + res = c.remove_unused_categories(inplace=True) + tm.assert_index_equal(c.categories, exp_categories_dropped) assert res is None diff --git a/pandas/tests/series/accessors/test_cat_accessor.py b/pandas/tests/series/accessors/test_cat_accessor.py index f561ac82a8901..8a4c4d56e264d 100644 --- a/pandas/tests/series/accessors/test_cat_accessor.py +++ b/pandas/tests/series/accessors/test_cat_accessor.py @@ -81,7 +81,10 @@ def test_cat_accessor_updates_on_inplace(self): ser = Series(list("abc")).astype("category") return_value = ser.drop(0, inplace=True) assert return_value is None - return_value = ser.cat.remove_unused_categories(inplace=True) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + return_value = ser.cat.remove_unused_categories(inplace=True) + assert return_value is None assert len(ser.cat.categories) == 2
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Related to #37643
https://api.github.com/repos/pandas-dev/pandas/pulls/37918
2020-11-17T21:29:28Z
2020-11-18T13:31:50Z
2020-11-18T13:31:50Z
2020-11-18T13:32:02Z
REF: ensure CategoricalIndex._shallow_copy only ever gets Categorical
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 9ea0ff323a33d..1b3e4864843f3 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2734,7 +2734,7 @@ def _union(self, other, sort): stacklevel=3, ) - return self._shallow_copy(result) + return result @final def _wrap_setop_result(self, other, result): @@ -2742,6 +2742,8 @@ def _wrap_setop_result(self, other, result): result, np.ndarray ): result = type(self._data)._simple_new(result, dtype=self.dtype) + elif is_categorical_dtype(self.dtype) and isinstance(result, np.ndarray): + result = Categorical(result, dtype=self.dtype) name = get_op_result_name(self, other) if isinstance(result, Index): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index d11d4952ff1c7..2cd18c99fe727 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import Any, List, Optional import warnings import numpy as np @@ -227,10 +227,15 @@ def _simple_new(cls, values: Categorical, name: Label = None): # -------------------------------------------------------------------- @doc(Index._shallow_copy) - def _shallow_copy(self, values=None, name: Label = no_default): + def _shallow_copy( + self, values: Optional[Categorical] = None, name: Label = no_default + ): name = self.name if name is no_default else name if values is not None: + # In tests we only get here with Categorical objects that + # have matching .ordered, and values.categories a subset of + # our own. However we do _not_ have a dtype match in general. values = Categorical(values, dtype=self.dtype) return super()._shallow_copy(values=values, name=name) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 88383cb7bbb6a..2983b4b746dcd 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -765,11 +765,7 @@ def intersection(self, other, sort=False): start = right[0] if end < start: - # pandas\core\indexes\datetimelike.py:758: error: Unexpected - # keyword argument "freq" for "DatetimeTimedeltaMixin" [call-arg] - result = type(self)( - data=[], dtype=self.dtype, freq=self.freq # type: ignore[call-arg] - ) + result = self[:0] else: lslice = slice(*left.slice_locs(start, end)) left_chunk = left._values[lslice] diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 4b8207331838e..8e12f84895361 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -29,7 +29,7 @@ from pandas.core.construction import extract_array import pandas.core.indexes.base as ibase from pandas.core.indexes.base import _index_shared_docs, maybe_extract_name -from pandas.core.indexes.numeric import Int64Index +from pandas.core.indexes.numeric import Float64Index, Int64Index from pandas.core.ops.common import unpack_zerodim_and_defer _empty_range = range(0) @@ -397,6 +397,8 @@ def _shallow_copy(self, values=None, name: Label = no_default): name = self.name if name is no_default else name if values is not None: + if values.dtype.kind == "f": + return Float64Index(values, name=name) return Int64Index._simple_new(values, name=name) result = self._simple_new(self._range, name=name) diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py index 94c6d2ad6dc95..b571ff7f63f58 100644 --- a/pandas/tests/indexes/base_class/test_setops.py +++ b/pandas/tests/indexes/base_class/test_setops.py @@ -31,7 +31,7 @@ def test_setops_preserve_object_dtype(self): result = idx._union(idx[1:], sort=None) expected = idx - tm.assert_index_equal(result, expected) + tm.assert_numpy_array_equal(result, expected.values) result = idx.union(idx[1:], sort=None) tm.assert_index_equal(result, expected) @@ -39,7 +39,7 @@ def test_setops_preserve_object_dtype(self): # if other is not monotonic increasing, _union goes through # a different route result = idx._union(idx[1:][::-1], sort=None) - tm.assert_index_equal(result, expected) + tm.assert_numpy_array_equal(result, expected.values) result = idx.union(idx[1:][::-1], sort=None) tm.assert_index_equal(result, expected)
still not as tightly restricted as id like, but a step in the right direction.
https://api.github.com/repos/pandas-dev/pandas/pulls/37917
2020-11-17T21:14:57Z
2020-11-18T00:38:16Z
2020-11-18T00:38:16Z
2020-11-18T02:59:59Z
REF: de-duplicate IntervalIndex compatiblity checks
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index f8bcbcfb158b5..10becdce5d6dd 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -179,7 +179,8 @@ cdef class IntervalMixin: return (self.right == self.left) & (self.closed != 'both') def _check_closed_matches(self, other, name='other'): - """Check if the closed attribute of `other` matches. + """ + Check if the closed attribute of `other` matches. Note that 'left' and 'right' are considered different from 'both'. diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 267d1330faceb..21264b00b91f8 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -130,19 +130,13 @@ def wrapped(self, other, sort=False): if op_name in ("difference",): result = result.astype(self.dtype) return result - elif self.closed != other.closed: - raise ValueError( - "can only do set operations between two IntervalIndex " - "objects that are closed on the same side" - ) - # GH 19016: ensure set op will not return a prohibited dtype - subtypes = [self.dtype.subtype, other.dtype.subtype] - common_subtype = find_common_type(subtypes) - if is_object_dtype(common_subtype): + if self._is_non_comparable_own_type(other): + # GH#19016: ensure set op will not return a prohibited dtype raise TypeError( - f"can only do {op_name} between two IntervalIndex " - "objects that have compatible dtypes" + "can only do set operations between two IntervalIndex " + "objects that are closed on the same side " + "and have compatible dtypes" ) return method(self, other, sort) @@ -717,11 +711,8 @@ def get_indexer( if self.equals(target_as_index): return np.arange(len(self), dtype="intp") - # different closed or incompatible subtype -> no matches - common_subtype = find_common_type( - [self.dtype.subtype, target_as_index.dtype.subtype] - ) - if self.closed != target_as_index.closed or is_object_dtype(common_subtype): + if self._is_non_comparable_own_type(target_as_index): + # different closed or incompatible subtype -> no matches return np.repeat(np.intp(-1), len(target_as_index)) # non-overlapping -> at most one match per interval in target_as_index @@ -763,10 +754,8 @@ def get_indexer_non_unique( # check that target_as_index IntervalIndex is compatible if isinstance(target_as_index, IntervalIndex): - common_subtype = find_common_type( - [self.dtype.subtype, target_as_index.dtype.subtype] - ) - if self.closed != target_as_index.closed or is_object_dtype(common_subtype): + + if self._is_non_comparable_own_type(target_as_index): # different closed or incompatible subtype -> no matches return ( np.repeat(-1, len(target_as_index)), @@ -837,6 +826,16 @@ def _convert_list_indexer(self, keyarr): return locs + def _is_non_comparable_own_type(self, other: "IntervalIndex") -> bool: + # different closed or incompatible subtype -> no matches + + # TODO: once closed is part of IntervalDtype, we can just define + # is_comparable_dtype GH#19371 + if self.closed != other.closed: + return True + common_subtype = find_common_type([self.dtype.subtype, other.dtype.subtype]) + return is_object_dtype(common_subtype) + # -------------------------------------------------------------------- @cache_readonly diff --git a/pandas/tests/indexes/interval/test_setops.py b/pandas/tests/indexes/interval/test_setops.py index 562497b29af12..0b94d70367b4d 100644 --- a/pandas/tests/indexes/interval/test_setops.py +++ b/pandas/tests/indexes/interval/test_setops.py @@ -159,18 +159,18 @@ def test_set_incompatible_types(self, closed, op_name, sort): # mixed closed msg = ( "can only do set operations between two IntervalIndex objects " - "that are closed on the same side" + "that are closed on the same side and have compatible dtypes" ) for other_closed in {"right", "left", "both", "neither"} - {closed}: other = monotonic_index(0, 11, closed=other_closed) - with pytest.raises(ValueError, match=msg): + with pytest.raises(TypeError, match=msg): set_op(other, sort=sort) # GH 19016: incompatible dtypes other = interval_range(Timestamp("20180101"), periods=9, closed=closed) msg = ( - f"can only do {op_name} between two IntervalIndex objects that have " - "compatible dtypes" + "can only do set operations between two IntervalIndex objects " + "that are closed on the same side and have compatible dtypes" ) with pytest.raises(TypeError, match=msg): set_op(other, sort=sort)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37916
2020-11-17T20:26:58Z
2020-11-18T00:54:19Z
2020-11-18T00:54:19Z
2020-11-18T02:54:00Z
CI: fix mypy error (quick fix)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 88383cb7bbb6a..71c72e97b8ada 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -773,7 +773,10 @@ def intersection(self, other, sort=False): else: lslice = slice(*left.slice_locs(start, end)) left_chunk = left._values[lslice] - result = type(self)._simple_new(left_chunk) + # error: Argument 1 to "_simple_new" of "DatetimeIndexOpsMixin" has + # incompatible type "Union[ExtensionArray, Any]"; expected + # "Union[DatetimeArray, TimedeltaArray, PeriodArray]" [arg-type] + result = type(self)._simple_new(left_chunk) # type: ignore[arg-type] return self._wrap_setop_result(other, result)
see https://github.com/pandas-dev/pandas/pull/37913#issuecomment-729155748
https://api.github.com/repos/pandas-dev/pandas/pulls/37914
2020-11-17T19:44:46Z
2020-11-17T21:22:33Z
2020-11-17T21:22:33Z
2020-11-18T11:47:39Z
DOC: add two new examples to the transform docs
diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index cc918c27b5c2e..03e39624253b2 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -324,4 +324,57 @@ 0 0.000000 1.000000 1 1.000000 2.718282 2 1.414214 7.389056 + +You can call transform on a GroupBy object: + +>>> df = pd.DataFrame({{ +... "Date": [ +... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05", +... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05"], +... "Data": [5, 8, 6, 1, 50, 100, 60, 120], +... }}) +>>> df + Date Data +0 2015-05-08 5 +1 2015-05-07 8 +2 2015-05-06 6 +3 2015-05-05 1 +4 2015-05-08 50 +5 2015-05-07 100 +6 2015-05-06 60 +7 2015-05-05 120 +>>> df.groupby('Date')['Data'].transform('sum') +0 55 +1 108 +2 66 +3 121 +4 55 +5 108 +6 66 +7 121 +Name: Data, dtype: int64 + +>>> df = pd.DataFrame({{ +... "c": [1, 1, 1, 2, 2, 2, 2], +... "type": ["m", "n", "o", "m", "m", "n", "n"] +... }}) +>>> df + c type +0 1 m +1 1 n +2 1 o +3 2 m +4 2 m +5 2 n +6 2 n +>>> df['size'] = df.groupby('c')['type'].transform(len) +>>> df + c type size +0 1 m 3 +1 1 n 3 +2 1 o 3 +3 2 m 4 +4 2 m 4 +5 2 n 4 +6 2 n 4 """
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This PR adds two examples to the transform user guide. The examples apply the transform method to a groupby object. This is from the PyData Global sprint, from [this issue](https://github.com/MarcoGorelli/PyDataGlobal2020-sprint/issues/5). The examples are taken from [here](https://stackoverflow.com/a/37189976/14583166) and [here](https://stackoverflow.com/a/54417351/14583166).
https://api.github.com/repos/pandas-dev/pandas/pulls/37912
2020-11-17T17:17:43Z
2020-11-19T18:40:13Z
2020-11-19T18:40:12Z
2020-11-19T18:40:17Z
CLN: Add *.zip to .gitignore
diff --git a/.gitignore b/.gitignore index 6c3c275c48fb7..1661862a5d066 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ *.log *.swp *.pdb +*.zip .project .pydevproject .settings
- [X] xref #37903 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37911
2020-11-17T10:26:39Z
2020-11-17T13:01:27Z
2020-11-17T13:01:27Z
2020-11-17T13:01:31Z
BUG: use compression=None (again) to avoid inferring compression
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 62da3c0c5cddc..0a57435ea4300 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -630,6 +630,7 @@ I/O - Bug in :func:`read_html` was raising a ``TypeError`` when supplying a ``pathlib.Path`` argument to the ``io`` parameter (:issue:`37705`) - :meth:`to_excel` and :meth:`to_markdown` support writing to fsspec URLs such as S3 and Google Cloud Storage (:issue:`33987`) - Bug in :meth:`read_fw` was not skipping blank lines (even with ``skip_blank_lines=True``) (:issue:`37758`) +- :meth:`read_fwf` was inferring compression with ``compression=None`` which was not consistent with the other :meth:``read_*`` functions (:issue:`37909`) Period ^^^^^^ diff --git a/pandas/io/common.py b/pandas/io/common.py index 695c1671abd61..8ec0a869c7042 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -468,8 +468,11 @@ def infer_compression( ------ ValueError on invalid compression specified. """ + if compression is None: + return None + # Infer compression - if compression in ("infer", None): + if compression == "infer": # Convert all path types (e.g. pathlib.Path) to strings filepath_or_buffer = stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py index 6e957313d8de8..690d3133dae5e 100644 --- a/pandas/tests/io/parser/test_compression.py +++ b/pandas/tests/io/parser/test_compression.py @@ -4,11 +4,12 @@ """ import os +from pathlib import Path import zipfile import pytest -import pandas as pd +from pandas import DataFrame import pandas._testing as tm @@ -130,7 +131,7 @@ def test_compression_utf_encoding(all_parsers, csv_dir_path, utf_value, encoding path = os.path.join(csv_dir_path, f"utf{utf_value}_ex_small.zip") result = parser.read_csv(path, encoding=encoding, compression="zip", sep="\t") - expected = pd.DataFrame( + expected = DataFrame( { "Country": ["Venezuela", "Venezuela"], "Twitter": ["Hugo Chávez Frías", "Henrique Capriles R."], @@ -149,3 +150,15 @@ def test_invalid_compression(all_parsers, invalid_compression): with pytest.raises(ValueError, match=msg): parser.read_csv("test_file.zip", **compress_kwargs) + + +def test_ignore_compression_extension(all_parsers): + parser = all_parsers + df = DataFrame({"a": [0, 1]}) + with tm.ensure_clean("test.csv") as path_csv: + with tm.ensure_clean("test.csv.zip") as path_zip: + # make sure to create un-compressed file with zip extension + df.to_csv(path_csv, index=False) + Path(path_zip).write_text(Path(path_csv).read_text()) + + tm.assert_frame_equal(parser.read_csv(path_zip, compression=None), df) diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index 5e9609956183b..d684bb36c3911 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -638,7 +638,7 @@ def test_default_delimiter(): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("infer", [True, False, None]) +@pytest.mark.parametrize("infer", [True, False]) def test_fwf_compression(compression_only, infer): data = """1111111111 2222222222
`read_fwf` was using `compression=None` to infer compression (in addition to `compression="infer"`). This is undocumented but was enforced in a test. In #37639, I thought this was the expected behavior for all functions. That as wrong, `read_csv` explicitly states that `compression=None` does not infer any compression.
https://api.github.com/repos/pandas-dev/pandas/pulls/37909
2020-11-17T05:00:42Z
2020-11-18T13:24:56Z
2020-11-18T13:24:56Z
2020-11-19T04:12:57Z
CLN: defer CategoricalIndex.astype to Categorical.astype
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 67818e6cf8fae..3dec27d4af1ce 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -405,12 +405,12 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: if is_categorical_dtype(dtype): dtype = cast(Union[str, CategoricalDtype], dtype) - # GH 10696/18593 + # GH 10696/18593/18630 dtype = self.dtype.update_dtype(dtype) - self = self.copy() if copy else self + result = self.copy() if copy else self if dtype == self.dtype: - return self - return self._set_dtype(dtype) + return result + return result._set_dtype(dtype) if is_extension_array_dtype(dtype): return array(self, dtype=dtype, copy=copy) if is_integer_dtype(dtype) and self.isna().any(): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index d944bd5ffad40..2d87799320902 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -14,10 +14,8 @@ from pandas.core.dtypes.common import ( ensure_platform_int, is_categorical_dtype, - is_interval_dtype, is_list_like, is_scalar, - pandas_dtype, ) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna @@ -371,20 +369,8 @@ def __contains__(self, key: Any) -> bool: @doc(Index.astype) def astype(self, dtype, copy=True): - if dtype is not None: - dtype = pandas_dtype(dtype) - - if is_interval_dtype(dtype): - from pandas import IntervalIndex - - return IntervalIndex(np.array(self)) - elif is_categorical_dtype(dtype): - # GH 18630 - dtype = self.dtype.update_dtype(dtype) - if dtype == self.dtype: - return self.copy() if copy else self - - return Index.astype(self, dtype=dtype, copy=copy) + res_data = self._data.astype(dtype, copy=copy) + return Index(res_data, name=self.name) @doc(Index.fillna) def fillna(self, value, downcast=None):
https://api.github.com/repos/pandas-dev/pandas/pulls/37908
2020-11-17T04:32:45Z
2020-11-17T13:28:02Z
2020-11-17T13:28:02Z
2020-11-17T15:23:55Z
REF: simplify dt64 formatter functions
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index a1a1a83555f95..52f5de62d7c9b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -387,7 +387,7 @@ def _formatter_func(self): from pandas.io.formats.format import get_format_datetime64 formatter = get_format_datetime64(is_dates_only=self._is_dates_only) - return lambda x: f"'{formatter(x, tz=self.tz)}'" + return lambda x: f"'{formatter(x)}'" # -------------------------------------------------------------------- # Set Operation Methods diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 2fae18bd76657..8ede35912a492 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -5,7 +5,6 @@ from contextlib import contextmanager from csv import QUOTE_NONE, QUOTE_NONNUMERIC -from datetime import tzinfo import decimal from functools import partial from io import StringIO @@ -36,7 +35,6 @@ from pandas._libs import lib from pandas._libs.missing import NA -from pandas._libs.tslib import format_array_from_datetime from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT from pandas._libs.tslibs.nattype import NaTType from pandas._typing import ( @@ -1529,11 +1527,9 @@ def _format_strings(self) -> List[str]: if self.formatter is not None and callable(self.formatter): return [self.formatter(x) for x in values] - fmt_values = format_array_from_datetime( - values.asi8.ravel(), - format=get_format_datetime64_from_values(values, self.date_format), - na_rep=self.nat_rep, - ).reshape(values.shape) + fmt_values = values._data._format_native_types( + na_rep=self.nat_rep, date_format=self.date_format + ) return fmt_values.tolist() @@ -1653,30 +1649,21 @@ def is_dates_only( return False -def _format_datetime64( - x: Union[NaTType, Timestamp], tz: Optional[tzinfo] = None, nat_rep: str = "NaT" -) -> str: - if x is None or (is_scalar(x) and isna(x)): +def _format_datetime64(x: Union[NaTType, Timestamp], nat_rep: str = "NaT") -> str: + if x is NaT: return nat_rep - if tz is not None or not isinstance(x, Timestamp): - if getattr(x, "tzinfo", None) is not None: - x = Timestamp(x).tz_convert(tz) - else: - x = Timestamp(x).tz_localize(tz) - return str(x) def _format_datetime64_dateonly( - x: Union[NaTType, Timestamp], nat_rep: str = "NaT", date_format: None = None + x: Union[NaTType, Timestamp], + nat_rep: str = "NaT", + date_format: Optional[str] = None, ) -> str: - if x is None or (is_scalar(x) and isna(x)): + if x is NaT: return nat_rep - if not isinstance(x, Timestamp): - x = Timestamp(x) - if date_format: return x.strftime(date_format) else: @@ -1684,15 +1671,15 @@ def _format_datetime64_dateonly( def get_format_datetime64( - is_dates_only: bool, nat_rep: str = "NaT", date_format: None = None + is_dates_only: bool, nat_rep: str = "NaT", date_format: Optional[str] = None ) -> Callable: if is_dates_only: - return lambda x, tz=None: _format_datetime64_dateonly( + return lambda x: _format_datetime64_dateonly( x, nat_rep=nat_rep, date_format=date_format ) else: - return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep) + return lambda x: _format_datetime64(x, nat_rep=nat_rep) def get_format_datetime64_from_values(
https://api.github.com/repos/pandas-dev/pandas/pulls/37907
2020-11-17T03:03:30Z
2020-11-17T12:57:29Z
2020-11-17T12:57:29Z
2020-11-17T15:21:59Z
CI: update pyupgrade to 2.7.4 in pre-commit
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f9b396715664a..717334bfe1299 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: name: isort (cython) types: [cython] - repo: https://github.com/asottile/pyupgrade - rev: v2.7.3 + rev: v2.7.4 hooks: - id: pyupgrade args: [--py37-plus]
Updating pyupgrade to 2.7.4 in pre-commit yaml. `pre-commit run pyupgrade --all-files` runs good :+1: - [x] closes #37891 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry - [x] passes `pre-commit run pyupgrade --all-files`
https://api.github.com/repos/pandas-dev/pandas/pulls/37906
2020-11-17T02:52:50Z
2020-11-17T09:23:09Z
2020-11-17T09:23:09Z
2022-01-17T20:07:24Z
BUG: groupby resample different results with .agg() vs .mean()
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index e2521cedb64cc..5d831f3a4f369 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -791,8 +791,9 @@ Groupby/resample/rolling - Bug in :meth:`.DataFrameGroupBy.quantile` and :meth:`.Resampler.quantile` raised ``TypeError`` when values were of type ``Timedelta`` (:issue:`29485`) - Bug in :meth:`.Rolling.median` and :meth:`.Rolling.quantile` returned wrong values for :class:`.BaseIndexer` subclasses with non-monotonic starting or ending points for windows (:issue:`37153`) - Bug in :meth:`DataFrame.groupby` dropped ``nan`` groups from result with ``dropna=False`` when grouping over a single column (:issue:`35646`, :issue:`35542`) -- Bug in :meth:`.DataFrameGroupBy.head`, :meth:`.DataFrameGroupBy.tail`, :meth:`SeriesGroupBy.head`, and :meth:`SeriesGroupBy.tail` would raise when used with ``axis=1`` (:issue:`9772`) +- Bug in :meth:`.DataFrameGroupBy.head`, :meth:`DataFrameGroupBy.tail`, :meth:`SeriesGroupBy.head`, and :meth:`SeriesGroupBy.tail` would raise when used with ``axis=1`` (:issue:`9772`) - Bug in :meth:`.DataFrameGroupBy.transform` would raise when used with ``axis=1`` and a transformation kernel (e.g. "shift") (:issue:`36308`) +- Bug in :meth:`.DataFrameGroupBy.resample` using ``.agg`` with sum produced different result than just calling ``.sum`` (:issue:`33548`) - Bug in :meth:`.DataFrameGroupBy.apply` dropped values on ``nan`` group when returning the same axes with the original frame (:issue:`38227`) - Bug in :meth:`.DataFrameGroupBy.quantile` couldn't handle with arraylike ``q`` when grouping by columns (:issue:`33795`) - Bug in :meth:`DataFrameGroupBy.rank` with ``datetime64tz`` or period dtype incorrectly casting results to those dtypes instead of returning ``float64`` dtype (:issue:`38187`) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 8267cdeb77517..d6ff2f89d758b 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -287,6 +287,7 @@ def __init__( self.indexer = None self.binner = None self._grouper = None + self._indexer = None self.dropna = dropna @final @@ -341,15 +342,24 @@ def _set_grouper(self, obj: FrameOrSeries, sort: bool = False): # Keep self.grouper value before overriding if self._grouper is None: self._grouper = self.grouper + self._indexer = self.indexer # the key must be a valid info item if self.key is not None: key = self.key # The 'on' is already defined if getattr(self.grouper, "name", None) == key and isinstance(obj, Series): - # pandas\core\groupby\grouper.py:348: error: Item "None" of - # "Optional[Any]" has no attribute "take" [union-attr] - ax = self._grouper.take(obj.index) # type: ignore[union-attr] + # Sometimes self._grouper will have been resorted while + # obj has not. In this case there is a mismatch when we + # call self._grouper.take(obj.index) so we need to undo the sorting + # before we call _grouper.take. + assert self._grouper is not None + if self._indexer is not None: + reverse_indexer = self._indexer.argsort() + unsorted_ax = self._grouper.take(reverse_indexer) + ax = unsorted_ax.take(obj.index) + else: + ax = self._grouper.take(obj.index) else: if key not in obj._info_axis: raise KeyError(f"The grouper name {key} is not found") diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index 15dd49f8bf182..da5bb0eb59f70 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -362,3 +362,39 @@ def test_apply_to_one_column_of_df(): tm.assert_series_equal(result, expected) result = df.resample("H").apply(lambda group: group["col"].sum()) tm.assert_series_equal(result, expected) + + +def test_resample_groupby_agg(): + # GH: 33548 + df = DataFrame( + { + "cat": [ + "cat_1", + "cat_1", + "cat_2", + "cat_1", + "cat_2", + "cat_1", + "cat_2", + "cat_1", + ], + "num": [5, 20, 22, 3, 4, 30, 10, 50], + "date": [ + "2019-2-1", + "2018-02-03", + "2020-3-11", + "2019-2-2", + "2019-2-2", + "2018-12-4", + "2020-3-11", + "2020-12-12", + ], + } + ) + df["date"] = pd.to_datetime(df["date"]) + + resampled = df.groupby("cat").resample("Y", on="date") + expected = resampled.sum() + result = resampled.agg({"num": "sum"}) + + tm.assert_frame_equal(result, expected)
- [x] closes #33548 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37905
2020-11-17T01:09:39Z
2020-12-22T14:07:19Z
2020-12-22T14:07:19Z
2020-12-22T14:07:38Z
DOC: Timestamp EX01 errors
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index d86d3261d404e..8069d8b5cacc8 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -273,6 +273,17 @@ cdef class _NaT(datetime): See Also -------- DatetimeIndex.to_numpy : Similar method for DatetimeIndex. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.to_numpy() + numpy.datetime64('2020-03-14T15:32:52.192548651') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.to_numpy() + numpy.datetime64('NaT') """ return self.to_datetime64() @@ -410,6 +421,17 @@ class NaTType(_NaT): Returns ------- str + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.month_name() + 'March' + + Analogous for ``pd.NaT``: + + >>> pd.NaT.month_name() + nan """, ) day_name = _make_nan_func( @@ -425,6 +447,17 @@ class NaTType(_NaT): Returns ------- str + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.day_name() + 'Saturday' + + Analogous for ``pd.NaT``: + + >>> pd.NaT.day_name() + nan """, ) # _nat_methods @@ -463,6 +496,12 @@ class NaTType(_NaT): Format string to convert Timestamp to string. See strftime documentation for more information on the format string: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.strftime('%Y-%m-%d %X') + '2020-03-14 15:32:52' """, ) @@ -481,6 +520,11 @@ class NaTType(_NaT): Timestamp.utcfromtimestamp(ts) Construct a naive UTC datetime from a POSIX timestamp. + + Examples + -------- + >>> pd.Timestamp.fromtimestamp(1584199972) + Timestamp('2020-03-14 15:32:52') """, ) fromtimestamp = _make_error_func( @@ -489,6 +533,13 @@ class NaTType(_NaT): Timestamp.fromtimestamp(ts) Transform timestamp[, tz] to tz's local time from POSIX timestamp. + + Examples + -------- + >>> pd.Timestamp.utcfromtimestamp(1584199972) + Timestamp('2020-03-14 15:32:52') + + Note that the output may change depending on your local time. """, ) combine = _make_error_func( @@ -497,6 +548,12 @@ class NaTType(_NaT): Timestamp.combine(date, time) Combine date, time into datetime with same date and time fields. + + Examples + -------- + >>> from datetime import date, time + >>> pd.Timestamp.combine(date(2020, 3, 14), time(15, 30, 15)) + Timestamp('2020-03-14 15:30:15') """, ) utcnow = _make_error_func( @@ -505,10 +562,26 @@ class NaTType(_NaT): Timestamp.utcnow() Return a new Timestamp representing UTC day and time. + + Examples + -------- + >>> pd.Timestamp.utcnow() + Timestamp('2020-11-16 22:50:18.092888+0000', tz='UTC') """, ) - timestamp = _make_error_func("timestamp", "Return POSIX timestamp as float.") + timestamp = _make_error_func( + "timestamp", + """ + Return POSIX timestamp as float. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548') + >>> ts.timestamp() + 1584199972.192548 + """ + ) # GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or # return NaT create functions that raise, for binding to NaTType @@ -531,6 +604,29 @@ class NaTType(_NaT): ------ TypeError If Timestamp is tz-naive. + + Examples + -------- + Create a timestamp object with UTC timezone: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651', tz='UTC') + >>> ts + Timestamp('2020-03-14 15:32:52.192548651+0000', tz='UTC') + + Change to Tokyo timezone: + + >>> ts.tz_convert(tz='Asia/Tokyo') + Timestamp('2020-03-15 00:32:52.192548651+0900', tz='Asia/Tokyo') + + Can also use ``astimezone``: + + >>> ts.astimezone(tz='Asia/Tokyo') + Timestamp('2020-03-15 00:32:52.192548651+0900', tz='Asia/Tokyo') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.tz_convert(tz='Asia/Tokyo') + NaT """, ) fromordinal = _make_error_func( @@ -549,6 +645,11 @@ class NaTType(_NaT): Offset to apply to the Timestamp. tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for the Timestamp. + + Examples + -------- + >>> pd.Timestamp.fromordinal(737425) + Timestamp('2020-01-01 00:00:00') """, ) @@ -559,6 +660,17 @@ class NaTType(_NaT): Convert a Timestamp object to a native Python datetime object. If warn=True, issue a warning if nanoseconds is nonzero. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548') + >>> ts.to_pydatetime() + datetime.datetime(2020, 3, 14, 15, 32, 52, 192548) + + Analogous for ``pd.NaT``: + + >>> pd.NaT.to_pydatetime() + NaT """, ) @@ -574,6 +686,16 @@ class NaTType(_NaT): ---------- tz : str or timezone object, default None Timezone to localize to. + + Examples + -------- + >>> pd.Timestamp.now() + Timestamp('2020-11-16 22:06:16.378782') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.now() + NaT """, ) today = _make_nat_func( @@ -589,6 +711,16 @@ class NaTType(_NaT): ---------- tz : str or timezone object, default None Timezone to localize to. + + Examples + -------- + >>> pd.Timestamp.today() + Timestamp('2020-11-16 22:37:39.969883') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.today() + NaT """, ) round = _make_nat_func( @@ -632,6 +764,41 @@ timedelta}, default 'raise' Raises ------ ValueError if the freq cannot be converted + + Examples + -------- + Create a timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + + A timestamp can be rounded using multiple frequency units: + + >>> ts.round(freq='H') # hour + Timestamp('2020-03-14 16:00:00') + + >>> ts.round(freq='T') # minute + Timestamp('2020-03-14 15:33:00') + + >>> ts.round(freq='S') # seconds + Timestamp('2020-03-14 15:32:52') + + >>> ts.round(freq='L') # milliseconds + Timestamp('2020-03-14 15:32:52.193000') + + ``freq`` can also be a multiple of a single unit, like '5T' (i.e. 5 minutes): + + >>> ts.round(freq='5T') + Timestamp('2020-03-14 15:35:00') + + or a combination of multiple units, like '1H30T' (i.e. 1 hour and 30 minutes): + + >>> ts.round(freq='1H30T') + Timestamp('2020-03-14 15:00:00') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.round() + NaT """, ) floor = _make_nat_func( @@ -671,6 +838,41 @@ timedelta}, default 'raise' Raises ------ ValueError if the freq cannot be converted. + + Examples + -------- + Create a timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + + A timestamp can be floored using multiple frequency units: + + >>> ts.floor(freq='H') # hour + Timestamp('2020-03-14 15:00:00') + + >>> ts.floor(freq='T') # minute + Timestamp('2020-03-14 15:32:00') + + >>> ts.floor(freq='S') # seconds + Timestamp('2020-03-14 15:32:52') + + >>> ts.floor(freq='N') # nanoseconds + Timestamp('2020-03-14 15:32:52.192548651') + + ``freq`` can also be a multiple of a single unit, like '5T' (i.e. 5 minutes): + + >>> ts.floor(freq='5T') + Timestamp('2020-03-14 15:30:00') + + or a combination of multiple units, like '1H30T' (i.e. 1 hour and 30 minutes): + + >>> ts.floor(freq='1H30T') + Timestamp('2020-03-14 15:00:00') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.floor() + NaT """, ) ceil = _make_nat_func( @@ -710,6 +912,41 @@ timedelta}, default 'raise' Raises ------ ValueError if the freq cannot be converted. + + Examples + -------- + Create a timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + + A timestamp can be ceiled using multiple frequency units: + + >>> ts.ceil(freq='H') # hour + Timestamp('2020-03-14 16:00:00') + + >>> ts.ceil(freq='T') # minute + Timestamp('2020-03-14 15:33:00') + + >>> ts.ceil(freq='S') # seconds + Timestamp('2020-03-14 15:32:53') + + >>> ts.ceil(freq='U') # microseconds + Timestamp('2020-03-14 15:32:52.192549') + + ``freq`` can also be a multiple of a single unit, like '5T' (i.e. 5 minutes): + + >>> ts.ceil(freq='5T') + Timestamp('2020-03-14 15:35:00') + + or a combination of multiple units, like '1H30T' (i.e. 1 hour and 30 minutes): + + >>> ts.ceil(freq='1H30T') + Timestamp('2020-03-14 16:30:00') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.ceil() + NaT """, ) @@ -732,6 +969,29 @@ timedelta}, default 'raise' ------ TypeError If Timestamp is tz-naive. + + Examples + -------- + Create a timestamp object with UTC timezone: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651', tz='UTC') + >>> ts + Timestamp('2020-03-14 15:32:52.192548651+0000', tz='UTC') + + Change to Tokyo timezone: + + >>> ts.tz_convert(tz='Asia/Tokyo') + Timestamp('2020-03-15 00:32:52.192548651+0900', tz='Asia/Tokyo') + + Can also use ``astimezone``: + + >>> ts.astimezone(tz='Asia/Tokyo') + Timestamp('2020-03-15 00:32:52.192548651+0900', tz='Asia/Tokyo') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.tz_convert(tz='Asia/Tokyo') + NaT """, ) tz_localize = _make_nat_func( @@ -787,6 +1047,24 @@ default 'raise' ------ TypeError If the Timestamp is tz-aware and tz is not None. + + Examples + -------- + Create a naive timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts + Timestamp('2020-03-14 15:32:52.192548651') + + Add 'Europe/Stockholm' as timezone: + + >>> ts.tz_localize(tz='Europe/Stockholm') + Timestamp('2020-03-14 15:32:52.192548651+0100', tz='Europe/Stockholm') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.tz_localize() + NaT """, ) replace = _make_nat_func( @@ -810,6 +1088,30 @@ default 'raise' Returns ------- Timestamp with fields replaced + + Examples + -------- + Create a timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651', tz='UTC') + >>> ts + Timestamp('2020-03-14 15:32:52.192548651+0000', tz='UTC') + + Replace year and the hour: + + >>> ts.replace(year=1999, hour=10) + Timestamp('1999-03-14 10:32:52.192548651+0000', tz='UTC') + + Replace timezone (not a conversion): + + >>> import pytz + >>> ts.replace(tzinfo=pytz.timezone('US/Pacific')) + Timestamp('2020-03-14 15:32:52.192548651-0700', tz='US/Pacific') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.replace(tzinfo=pytz.timezone('US/Pacific')) + NaT """, ) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 60ffa3dd46989..a4f764878d19e 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -386,6 +386,16 @@ cdef class _Timestamp(ABCTimestamp): def is_month_start(self) -> bool: """ Return True if date is first day of month. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.is_month_start + False + + >>> ts = pd.Timestamp(2020, 1, 1) + >>> ts.is_month_start + True """ if self.freq is None: # fast-path for non-business frequencies @@ -396,6 +406,16 @@ cdef class _Timestamp(ABCTimestamp): def is_month_end(self) -> bool: """ Return True if date is last day of month. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.is_month_end + False + + >>> ts = pd.Timestamp(2020, 12, 31) + >>> ts.is_month_end + True """ if self.freq is None: # fast-path for non-business frequencies @@ -406,6 +426,16 @@ cdef class _Timestamp(ABCTimestamp): def is_quarter_start(self) -> bool: """ Return True if date is first day of the quarter. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.is_quarter_start + False + + >>> ts = pd.Timestamp(2020, 4, 1) + >>> ts.is_quarter_start + True """ if self.freq is None: # fast-path for non-business frequencies @@ -416,6 +446,16 @@ cdef class _Timestamp(ABCTimestamp): def is_quarter_end(self) -> bool: """ Return True if date is last day of the quarter. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.is_quarter_end + False + + >>> ts = pd.Timestamp(2020, 3, 31) + >>> ts.is_quarter_end + True """ if self.freq is None: # fast-path for non-business frequencies @@ -426,6 +466,16 @@ cdef class _Timestamp(ABCTimestamp): def is_year_start(self) -> bool: """ Return True if date is first day of the year. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.is_year_start + False + + >>> ts = pd.Timestamp(2020, 1, 1) + >>> ts.is_year_start + True """ if self.freq is None: # fast-path for non-business frequencies @@ -436,6 +486,16 @@ cdef class _Timestamp(ABCTimestamp): def is_year_end(self) -> bool: """ Return True if date is last day of the year. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.is_year_end + False + + >>> ts = pd.Timestamp(2020, 12, 31) + >>> ts.is_year_end + True """ if self.freq is None: # fast-path for non-business frequencies @@ -464,6 +524,17 @@ cdef class _Timestamp(ABCTimestamp): Returns ------- str + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.day_name() + 'Saturday' + + Analogous for ``pd.NaT``: + + >>> pd.NaT.day_name() + nan """ return self._get_date_name_field("day_name", locale) @@ -479,6 +550,17 @@ cdef class _Timestamp(ABCTimestamp): Returns ------- str + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.month_name() + 'March' + + Analogous for ``pd.NaT``: + + >>> pd.NaT.month_name() + nan """ return self._get_date_name_field("month_name", locale) @@ -486,6 +568,12 @@ cdef class _Timestamp(ABCTimestamp): def is_leap_year(self) -> bool: """ Return True if year is a leap year. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.is_leap_year + True """ return bool(ccalendar.is_leapyear(self.year)) @@ -493,6 +581,12 @@ cdef class _Timestamp(ABCTimestamp): def day_of_week(self) -> int: """ Return day of the week. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.day_of_week + 5 """ return self.weekday() @@ -500,6 +594,12 @@ cdef class _Timestamp(ABCTimestamp): def day_of_year(self) -> int: """ Return the day of the year. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.day_of_year + 74 """ return ccalendar.get_day_of_year(self.year, self.month, self.day) @@ -507,6 +607,12 @@ cdef class _Timestamp(ABCTimestamp): def quarter(self) -> int: """ Return the quarter of the year. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.quarter + 1 """ return ((self.month - 1) // 3) + 1 @@ -514,6 +620,12 @@ cdef class _Timestamp(ABCTimestamp): def week(self) -> int: """ Return the week number of the year. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.week + 11 """ return ccalendar.get_week_of_year(self.year, self.month, self.day) @@ -521,6 +633,12 @@ cdef class _Timestamp(ABCTimestamp): def days_in_month(self) -> int: """ Return the number of days in the month. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14) + >>> ts.days_in_month + 31 """ return ccalendar.get_days_in_month(self.year, self.month) @@ -530,6 +648,12 @@ cdef class _Timestamp(ABCTimestamp): def normalize(self) -> "Timestamp": """ Normalize Timestamp to midnight, preserving tz information. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14, 15, 30) + >>> ts.normalize() + Timestamp('2020-03-14 00:00:00') """ cdef: local_val = self._maybe_convert_value_to_local() @@ -639,11 +763,25 @@ cdef class _Timestamp(ABCTimestamp): def asm8(self) -> np.datetime64: """ Return numpy datetime64 format in nanoseconds. + + Examples + -------- + >>> ts = pd.Timestamp(2020, 3, 14, 15) + >>> ts.asm8 + numpy.datetime64('2020-03-14T15:00:00.000000000') """ return np.datetime64(self.value, 'ns') def timestamp(self): - """Return POSIX timestamp as float.""" + """ + Return POSIX timestamp as float. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548') + >>> ts.timestamp() + 1584199972.192548 + """ # GH 17329 # Note: Naive timestamps will not match datetime.stdlib return round(self.value / 1e9, 6) @@ -653,6 +791,17 @@ cdef class _Timestamp(ABCTimestamp): Convert a Timestamp object to a native Python datetime object. If warn=True, issue a warning if nanoseconds is nonzero. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548') + >>> ts.to_pydatetime() + datetime.datetime(2020, 3, 14, 15, 32, 52, 192548) + + Analogous for ``pd.NaT``: + + >>> pd.NaT.to_pydatetime() + NaT """ if self.nanosecond != 0 and warn: warnings.warn("Discarding nonzero nanoseconds in conversion", @@ -685,12 +834,38 @@ cdef class _Timestamp(ABCTimestamp): See Also -------- DatetimeIndex.to_numpy : Similar method for DatetimeIndex. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.to_numpy() + numpy.datetime64('2020-03-14T15:32:52.192548651') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.to_numpy() + numpy.datetime64('NaT') """ return self.to_datetime64() def to_period(self, freq=None): """ Return an period of which this timestamp is an observation. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.to_period(freq='Y) # Year end frequency + numpy.datetime64('2020-03-14T15:32:52.192548651') + + >>> ts.to_period(freq='M') # Month end frequency + Period('2020-03', 'M') + + >>> ts.to_period(freq='W') # Weekly frequency + Period('2020-03-09/2020-03-15', 'W-SUN') + + >>> ts.to_period(freq='Q') # Quarter end frequency + Period('2020Q1', 'Q-DEC') """ from pandas import Period @@ -800,6 +975,11 @@ class Timestamp(_Timestamp): Offset to apply to the Timestamp. tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for the Timestamp. + + Examples + -------- + >>> pd.Timestamp.fromordinal(737425) + Timestamp('2020-01-01 00:00:00') """ return cls(datetime.fromordinal(ordinal), freq=freq, tz=tz) @@ -816,6 +996,16 @@ class Timestamp(_Timestamp): ---------- tz : str or timezone object, default None Timezone to localize to. + + Examples + -------- + >>> pd.Timestamp.now() + Timestamp('2020-11-16 22:06:16.378782') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.now() + NaT """ if isinstance(tz, str): tz = maybe_get_tz(tz) @@ -834,6 +1024,16 @@ class Timestamp(_Timestamp): ---------- tz : str or timezone object, default None Timezone to localize to. + + Examples + -------- + >>> pd.Timestamp.today() + Timestamp('2020-11-16 22:37:39.969883') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.today() + NaT """ return cls.now(tz) @@ -843,6 +1043,11 @@ class Timestamp(_Timestamp): Timestamp.utcnow() Return a new Timestamp representing UTC day and time. + + Examples + -------- + >>> pd.Timestamp.utcnow() + Timestamp('2020-11-16 22:50:18.092888+0000', tz='UTC') """ return cls.now(UTC) @@ -852,6 +1057,11 @@ class Timestamp(_Timestamp): Timestamp.utcfromtimestamp(ts) Construct a naive UTC datetime from a POSIX timestamp. + + Examples + -------- + >>> pd.Timestamp.fromtimestamp(1584199972) + Timestamp('2020-03-14 15:32:52') """ return cls(datetime.utcfromtimestamp(ts)) @@ -861,6 +1071,13 @@ class Timestamp(_Timestamp): Timestamp.fromtimestamp(ts) Transform timestamp[, tz] to tz's local time from POSIX timestamp. + + Examples + -------- + >>> pd.Timestamp.utcfromtimestamp(1584199972) + Timestamp('2020-03-14 15:32:52') + + Note that the output may change depending on your local time. """ return cls(datetime.fromtimestamp(ts)) @@ -877,6 +1094,12 @@ class Timestamp(_Timestamp): Format string to convert Timestamp to string. See strftime documentation for more information on the format string: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.strftime('%Y-%m-%d %X') + '2020-03-14 15:32:52' """ return datetime.strftime(self, format) @@ -899,6 +1122,12 @@ class Timestamp(_Timestamp): Timestamp.combine(date, time) Combine date, time into datetime with same date and time fields. + + Examples + -------- + >>> from datetime import date, time + >>> pd.Timestamp.combine(date(2020, 3, 14), time(15, 30, 15)) + Timestamp('2020-03-14 15:30:15') """ return cls(datetime.combine(date, time)) @@ -1113,6 +1342,41 @@ timedelta}, default 'raise' Raises ------ ValueError if the freq cannot be converted + + Examples + -------- + Create a timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + + A timestamp can be rounded using multiple frequency units: + + >>> ts.round(freq='H') # hour + Timestamp('2020-03-14 16:00:00') + + >>> ts.round(freq='T') # minute + Timestamp('2020-03-14 15:33:00') + + >>> ts.round(freq='S') # seconds + Timestamp('2020-03-14 15:32:52') + + >>> ts.round(freq='L') # milliseconds + Timestamp('2020-03-14 15:32:52.193000') + + ``freq`` can also be a multiple of a single unit, like '5T' (i.e. 5 minutes): + + >>> ts.round(freq='5T') + Timestamp('2020-03-14 15:35:00') + + or a combination of multiple units, like '1H30T' (i.e. 1 hour and 30 minutes): + + >>> ts.round(freq='1H30T') + Timestamp('2020-03-14 15:00:00') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.round() + NaT """ return self._round( freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent @@ -1154,6 +1418,41 @@ timedelta}, default 'raise' Raises ------ ValueError if the freq cannot be converted. + + Examples + -------- + Create a timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + + A timestamp can be floored using multiple frequency units: + + >>> ts.floor(freq='H') # hour + Timestamp('2020-03-14 15:00:00') + + >>> ts.floor(freq='T') # minute + Timestamp('2020-03-14 15:32:00') + + >>> ts.floor(freq='S') # seconds + Timestamp('2020-03-14 15:32:52') + + >>> ts.floor(freq='N') # nanoseconds + Timestamp('2020-03-14 15:32:52.192548651') + + ``freq`` can also be a multiple of a single unit, like '5T' (i.e. 5 minutes): + + >>> ts.floor(freq='5T') + Timestamp('2020-03-14 15:30:00') + + or a combination of multiple units, like '1H30T' (i.e. 1 hour and 30 minutes): + + >>> ts.floor(freq='1H30T') + Timestamp('2020-03-14 15:00:00') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.floor() + NaT """ return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) @@ -1193,6 +1492,41 @@ timedelta}, default 'raise' Raises ------ ValueError if the freq cannot be converted. + + Examples + -------- + Create a timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + + A timestamp can be ceiled using multiple frequency units: + + >>> ts.ceil(freq='H') # hour + Timestamp('2020-03-14 16:00:00') + + >>> ts.ceil(freq='T') # minute + Timestamp('2020-03-14 15:33:00') + + >>> ts.ceil(freq='S') # seconds + Timestamp('2020-03-14 15:32:53') + + >>> ts.ceil(freq='U') # microseconds + Timestamp('2020-03-14 15:32:52.192549') + + ``freq`` can also be a multiple of a single unit, like '5T' (i.e. 5 minutes): + + >>> ts.ceil(freq='5T') + Timestamp('2020-03-14 15:35:00') + + or a combination of multiple units, like '1H30T' (i.e. 1 hour and 30 minutes): + + >>> ts.ceil(freq='1H30T') + Timestamp('2020-03-14 16:30:00') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.ceil() + NaT """ return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) @@ -1200,6 +1534,12 @@ timedelta}, default 'raise' def tz(self): """ Alias for tzinfo. + + Examples + -------- + >>> ts = pd.Timestamp(1584226800, unit='s', tz='Europe/Stockholm') + >>> ts.tz + <DstTzInfo 'Europe/Stockholm' CET+1:00:00 STD> """ return self.tzinfo @@ -1270,6 +1610,24 @@ default 'raise' ------ TypeError If the Timestamp is tz-aware and tz is not None. + + Examples + -------- + Create a naive timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts + Timestamp('2020-03-14 15:32:52.192548651') + + Add 'Europe/Stockholm' as timezone: + + >>> ts.tz_localize(tz='Europe/Stockholm') + Timestamp('2020-03-14 15:32:52.192548651+0100', tz='Europe/Stockholm') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.tz_localize() + NaT """ if ambiguous == 'infer': raise ValueError('Cannot infer offset with only one time.') @@ -1318,6 +1676,29 @@ default 'raise' ------ TypeError If Timestamp is tz-naive. + + Examples + -------- + Create a timestamp object with UTC timezone: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651', tz='UTC') + >>> ts + Timestamp('2020-03-14 15:32:52.192548651+0000', tz='UTC') + + Change to Tokyo timezone: + + >>> ts.tz_convert(tz='Asia/Tokyo') + Timestamp('2020-03-15 00:32:52.192548651+0900', tz='Asia/Tokyo') + + Can also use ``astimezone``: + + >>> ts.astimezone(tz='Asia/Tokyo') + Timestamp('2020-03-15 00:32:52.192548651+0900', tz='Asia/Tokyo') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.tz_convert(tz='Asia/Tokyo') + NaT """ if self.tzinfo is None: # tz naive, use tz_localize @@ -1362,6 +1743,30 @@ default 'raise' Returns ------- Timestamp with fields replaced + + Examples + -------- + Create a timestamp object: + + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651', tz='UTC') + >>> ts + Timestamp('2020-03-14 15:32:52.192548651+0000', tz='UTC') + + Replace year and the hour: + + >>> ts.replace(year=1999, hour=10) + Timestamp('1999-03-14 10:32:52.192548651+0000', tz='UTC') + + Replace timezone (not a conversion): + + >>> import pytz + >>> ts.replace(tzinfo=pytz.timezone('US/Pacific')) + Timestamp('2020-03-14 15:32:52.192548651-0700', tz='US/Pacific') + + Analogous for ``pd.NaT``: + + >>> pd.NaT.replace(tzinfo=pytz.timezone('US/Pacific')) + NaT """ cdef: @@ -1441,6 +1846,12 @@ default 'raise' """ Convert TimeStamp to a Julian Date. 0 Julian date is noon January 1, 4713 BC. + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52') + >>> ts.to_julian_date() + 2458923.147824074 """ year = self.year month = self.month
Example section added for several methods in class [pd.Timestamp](https://github.com/pandas-dev/pandas/blob/1f89b82cc65f512e78b0038975425cc3d0772a05/pandas/_libs/tslibs/timestamps.pyx#L768) - [X] xref #37875 - [ ] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry --- Examples added for: - pandas.Timestamp.asm8 - pandas.Timestamp.day_of_week - pandas.Timestamp.day_of_year - pandas.Timestamp.days_in_month - pandas.Timestamp.is_leap_year - pandas.Timestamp.is_month_end - pandas.Timestamp.is_month_start - pandas.Timestamp.is_quarter_end - pandas.Timestamp.is_quarter_start - pandas.Timestamp.is_year_end - pandas.Timestamp.is_year_start - pandas.Timestamp.quarter - pandas.Timestamp.tz - pandas.Timestamp.week - pandas.Timestamp.ceil - pandas.Timestamp.combine - pandas.Timestamp.day_name - pandas.Timestamp.floor - pandas.Timestamp.fromordinal - pandas.Timestamp.fromtimestamp - pandas.Timestamp.month_name - pandas.Timestamp.normalize - pandas.Timestamp.now - pandas.Timestamp.replace - pandas.Timestamp.round - pandas.Timestamp.strftime - pandas.Timestamp.timestamp - pandas.Timestamp.to_numpy - pandas.Timestamp.to_julian_date - pandas.Timestamp.to_period - pandas.Timestamp.to_pydatetime - pandas.Timestamp.today - pandas.Timestamp.tz_convert - pandas.Timestamp.tz_localize - pandas.Timestamp.utcfromtimestamp - pandas.Timestamp.utcnow - pandas.Timestamp.weekday
https://api.github.com/repos/pandas-dev/pandas/pulls/37904
2020-11-16T23:58:34Z
2021-04-30T00:47:01Z
2021-04-30T00:47:01Z
2021-04-30T00:47:25Z
CLN: Unused file 'path_to_file.zip'
diff --git a/scripts/path_to_file.zip b/scripts/path_to_file.zip deleted file mode 100644 index ae2957e99dfae..0000000000000 Binary files a/scripts/path_to_file.zip and /dev/null differ
New file [path_to_file.zip](https://github.com/pandas-dev/pandas/blob/master/scripts/path_to_file.zip) was added in #37844. I believe this is file is (accidentally?) generated when running [scripts/validate_docstrings.py](https://github.com/pandas-dev/pandas/blob/master/scripts/validate_docstrings.py) and probably is not needed in the repository.
https://api.github.com/repos/pandas-dev/pandas/pulls/37903
2020-11-16T23:07:46Z
2020-11-17T01:06:00Z
2020-11-17T01:06:00Z
2020-11-17T01:10:54Z
TYP: Add cast to ABC classes.
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index ec88eb817b3f8..f7f575ea1c29c 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -60,7 +60,7 @@ from pandas.core.indexers import validate_indices if TYPE_CHECKING: - from pandas import Categorical, DataFrame, Series + from pandas import Categorical, DataFrame, Index, Series _shared_docs: Dict[str, str] = {} @@ -533,7 +533,7 @@ def factorize( sort: bool = False, na_sentinel: Optional[int] = -1, size_hint: Optional[int] = None, -) -> Tuple[np.ndarray, Union[np.ndarray, ABCIndex]]: +) -> Tuple[np.ndarray, Union[np.ndarray, "Index"]]: """ Encode the object as an enumerated type or categorical variable. diff --git a/pandas/core/base.py b/pandas/core/base.py index b3366cca37617..5f724d9e89d05 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -269,12 +269,14 @@ def __getitem__(self, key): return self._gotitem(list(key), ndim=2) elif not getattr(self, "as_index", False): - if key not in self.obj.columns: + # error: "SelectionMixin" has no attribute "obj" [attr-defined] + if key not in self.obj.columns: # type: ignore[attr-defined] raise KeyError(f"Column not found: {key}") return self._gotitem(key, ndim=2) else: - if key not in self.obj: + # error: "SelectionMixin" has no attribute "obj" [attr-defined] + if key not in self.obj: # type: ignore[attr-defined] raise KeyError(f"Column not found: {key}") return self._gotitem(key, ndim=1) @@ -919,10 +921,9 @@ def _map_values(self, mapper, na_action=None): # "astype" [attr-defined] values = self.astype(object)._values # type: ignore[attr-defined] if na_action == "ignore": - - def map_f(values, f): - return lib.map_infer_mask(values, f, isna(values).view(np.uint8)) - + map_f = lambda values, f: lib.map_infer_mask( + values, f, isna(values).view(np.uint8) + ) elif na_action is None: map_f = lib.map_infer else: diff --git a/pandas/core/common.py b/pandas/core/common.py index d5c078b817ca0..09ed2005cd028 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -469,7 +469,8 @@ def convert_to_list_like( inputs are returned unmodified whereas others are converted to list. """ if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)): - return values + # np.ndarray resolving as Any gives a false positive + return values # type: ignore[return-value] elif isinstance(values, abc.Iterable) and not isinstance(values, str): return list(values) diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 8a8b0d564ea49..5ad3e78a76866 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -1,9 +1,10 @@ """ Core eval alignment algorithms. """ +from __future__ import annotations from functools import partial, wraps -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import TYPE_CHECKING, Dict, Optional, Sequence, Tuple, Type, Union import warnings import numpy as np @@ -17,13 +18,16 @@ import pandas.core.common as com from pandas.core.computation.common import result_type_many +if TYPE_CHECKING: + from pandas.core.indexes.api import Index + def _align_core_single_unary_op( term, -) -> Tuple[Union[partial, Type[FrameOrSeries]], Optional[Dict[str, int]]]: +) -> Tuple[Union[partial, Type[FrameOrSeries]], Optional[Dict[str, Index]]]: typ: Union[partial, Type[FrameOrSeries]] - axes: Optional[Dict[str, int]] = None + axes: Optional[Dict[str, Index]] = None if isinstance(term.value, np.ndarray): typ = partial(np.asanyarray, dtype=term.value.dtype) @@ -36,8 +40,8 @@ def _align_core_single_unary_op( def _zip_axes_from_type( - typ: Type[FrameOrSeries], new_axes: Sequence[int] -) -> Dict[str, int]: + typ: Type[FrameOrSeries], new_axes: Sequence[Index] +) -> Dict[str, Index]: return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)} diff --git a/pandas/core/computation/parsing.py b/pandas/core/computation/parsing.py index 86e125b6b909b..a1bebc92046ae 100644 --- a/pandas/core/computation/parsing.py +++ b/pandas/core/computation/parsing.py @@ -8,6 +8,8 @@ import tokenize from typing import Iterator, Tuple +from pandas._typing import Label + # A token value Python's tokenizer probably will never use. BACKTICK_QUOTED_STRING = 100 @@ -91,7 +93,7 @@ def clean_backtick_quoted_toks(tok: Tuple[int, str]) -> Tuple[int, str]: return toknum, tokval -def clean_column_name(name: str) -> str: +def clean_column_name(name: "Label") -> "Label": """ Function to emulate the cleaning of a backtick quoted name. @@ -102,12 +104,12 @@ def clean_column_name(name: str) -> str: Parameters ---------- - name : str + name : hashable Name to be cleaned. Returns ------- - name : str + name : hashable Returns the name after tokenizing and cleaning. Notes diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 7901e150a7ff4..f9ebe3f1e185e 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -351,7 +351,7 @@ def array( return result -def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike: +def extract_array(obj: object, extract_numpy: bool = False) -> Union[Any, ArrayLike]: """ Extract the ndarray or ExtensionArray from a Series or Index. @@ -399,9 +399,7 @@ def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike: if extract_numpy and isinstance(obj, ABCPandasArray): obj = obj.to_numpy() - # error: Incompatible return value type (got "Index", expected "ExtensionArray") - # error: Incompatible return value type (got "Series", expected "ExtensionArray") - return obj # type: ignore[return-value] + return obj def sanitize_array( diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index 7d2549713c6bc..34891180906bb 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -1,4 +1,11 @@ """ define generic base classes for pandas objects """ +from __future__ import annotations + +from typing import TYPE_CHECKING, Type, cast + +if TYPE_CHECKING: + from pandas import DataFrame, Series + from pandas.core.generic import NDFrame # define abstract base classes to enable isinstance type checking on our @@ -53,9 +60,17 @@ def _check(cls, inst) -> bool: }, ) -ABCNDFrame = create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe")) -ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",)) -ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) +ABCNDFrame = cast( + "Type[NDFrame]", + create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe")), +) +ABCSeries = cast( + "Type[Series]", + create_pandas_abc_type("ABCSeries", "_typ", ("series",)), +) +ABCDataFrame = cast( + "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) +) ABCCategorical = create_pandas_abc_type("ABCCategorical", "_typ", ("categorical")) ABCDatetimeArray = create_pandas_abc_type("ABCDatetimeArray", "_typ", ("datetimearray")) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4e6aba1961b64..fee143816164d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -511,7 +511,7 @@ def _get_axis_resolvers(self, axis: str) -> Dict[str, Union[Series, MultiIndex]] return d @final - def _get_index_resolvers(self) -> Dict[str, Union[Series, MultiIndex]]: + def _get_index_resolvers(self) -> Dict[Label, Union[Series, MultiIndex]]: from pandas.core.computation.parsing import clean_column_name d: Dict[str, Union[Series, MultiIndex]] = {} @@ -521,7 +521,7 @@ def _get_index_resolvers(self) -> Dict[str, Union[Series, MultiIndex]]: return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} @final - def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]: + def _get_cleaned_column_resolvers(self) -> Dict[Label, Series]: """ Return the special character free column resolvers of a dataframe. @@ -532,7 +532,6 @@ def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]: from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): - self = cast("Series", self) return {clean_column_name(self.name): self} return { diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index a8951e342e0da..47283375beaa3 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2016,7 +2016,7 @@ def ravel(i): raise ValueError("Incompatible indexer with Series") - def _align_frame(self, indexer, df: ABCDataFrame): + def _align_frame(self, indexer, df: "DataFrame"): is_frame = self.ndim == 2 if isinstance(indexer, tuple): diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index bcafa2c2fdca7..18c5452d9d6cf 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -369,7 +369,7 @@ def extract_index(data) -> Index: index = Index([]) elif len(data) > 0: raw_lengths = [] - indexes = [] + indexes: List[Union[List[Label], Index]] = [] have_raw_arrays = False have_series = False diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 77b1076920f20..a05c4270ff0c3 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -3,11 +3,21 @@ """ from collections import abc -from typing import TYPE_CHECKING, Iterable, List, Mapping, Type, Union, cast, overload +from typing import ( + TYPE_CHECKING, + Iterable, + List, + Mapping, + Optional, + Type, + Union, + cast, + overload, +) import numpy as np -from pandas._typing import FrameOrSeries, FrameOrSeriesUnion, Label +from pandas._typing import FrameOrSeriesUnion, Label from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries @@ -295,7 +305,7 @@ class _Concatenator: def __init__( self, - objs: Union[Iterable[FrameOrSeries], Mapping[Label, FrameOrSeries]], + objs: Union[Iterable["NDFrame"], Mapping[Label, "NDFrame"]], axis=0, join: str = "outer", keys=None, @@ -366,7 +376,7 @@ def __init__( # get the sample # want the highest ndim that we have, and must be non-empty # unless all objs are empty - sample = None + sample: Optional["NDFrame"] = None if len(ndims) > 1: max_ndim = max(ndims) for obj in objs: @@ -436,6 +446,8 @@ def __init__( # to line up if self._is_frame and axis == 1: name = 0 + # mypy needs to know sample is not an NDFrame + sample = cast("FrameOrSeriesUnion", sample) obj = sample._constructor({name: obj}) self.objs.append(obj) diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 7d6a2bf1d776d..9d16beba669ca 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -157,11 +157,10 @@ def __init__(self, data): array = data.array self._array = array + self._index = self._name = None if isinstance(data, ABCSeries): self._index = data.index self._name = data.name - else: - self._index = self._name = None # ._values.categories works for both Series/Index self._parent = data._values.categories if self._is_categorical else data diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 938f1846230cb..6ebf610587d30 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -1,5 +1,6 @@ """Common utility functions for rolling operations""" from collections import defaultdict +from typing import cast import warnings import numpy as np @@ -109,6 +110,9 @@ def dataframe_from_int_dict(data, frame_template): # set the index and reorder if arg2.columns.nlevels > 1: + # mypy needs to know columns is a MultiIndex, Index doesn't + # have levels attribute + arg2.columns = cast(MultiIndex, arg2.columns) result.index = MultiIndex.from_product( arg2.columns.levels + [result_index] ) diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index bec1f48f5e64a..b8fc93d9aba93 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -7,7 +7,7 @@ import matplotlib.ticker as ticker import numpy as np -from pandas._typing import FrameOrSeries +from pandas._typing import FrameOrSeriesUnion from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries @@ -30,7 +30,9 @@ def format_date_labels(ax: "Axes", rot): fig.subplots_adjust(bottom=0.2) -def table(ax, data: FrameOrSeries, rowLabels=None, colLabels=None, **kwargs) -> "Table": +def table( + ax, data: FrameOrSeriesUnion, rowLabels=None, colLabels=None, **kwargs +) -> "Table": if isinstance(data, ABCSeries): data = data.to_frame() elif isinstance(data, ABCDataFrame):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Implements the idea of @simonjayhawkins from https://github.com/pandas-dev/pandas/issues/27353#issuecomment-526391462 to use cast on ABC classes. Currently only implements ABCDataFrame, ABCNDFrame, and ABCSeries. Previously, mypy would resolve e.g. ABCSeries as any, now it gets resolved as Series. One issue remains: I don't understand why the code below generates this complaint. > pandas/core/common.py:472: error: Incompatible return value type (got "Union[Any, List[Any], Series]", expected "Union[List[Any], ExtensionArray]") [return-value] ``` def convert_to_list_like( values: Union[Scalar, Iterable, AnyArrayLike] ) -> Union[List, AnyArrayLike]: if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)): return values ... ``` One can add Series to the return Union to resolve this, but it doesn't seem like that should be necessary. I'm also getting: > pandas/core/base.py:923: note: Internal mypy error checking function redefinition on the lines ``` def map_f(values, f): return lib.map_infer_mask(values, f, isna(values).view(np.uint8)) ``` where `map_f` is defined in various ways in if-else blocks. I'm not sure if this is a mypy bug.
https://api.github.com/repos/pandas-dev/pandas/pulls/37902
2020-11-16T23:00:05Z
2020-11-24T13:30:34Z
2020-11-24T13:30:34Z
2020-11-24T16:55:18Z
BUG: obj.loc[listlike] with missing keys and CategoricalIndex
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 3249c4bb9e0ef..0a888473a1069 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -587,6 +587,7 @@ Indexing - Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using a listlike indexer containing NA values (:issue:`37722`) - Bug in :meth:`DataFrame.xs` ignored ``droplevel=False`` for columns (:issue:`19056`) - Bug in :meth:`DataFrame.reindex` raising ``IndexingError`` wrongly for empty :class:`DataFrame` with ``tolerance`` not None or ``method="nearest"`` (:issue:`27315`) +- Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using listlike indexer that contains elements that are in the index's ``categories`` but not in the index itself failing to raise ``KeyError`` (:issue:`37901`) Missing ^^^^^^^ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index e67ece36b55a5..62c6a6fa7b513 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -576,23 +576,11 @@ def _convert_list_indexer(self, keyarr): # the categories if self.categories._defer_to_indexing: + # See tests.indexing.interval.test_interval:test_loc_getitem_frame indexer = self.categories._convert_list_indexer(keyarr) return Index(self.codes).get_indexer_for(indexer) - msg = "a list-indexer must only include values that are in the categories" - if self.hasnans: - msg += " or NA" - try: - codes = self._data._validate_setitem_value(keyarr) - except (ValueError, TypeError) as err: - if "Index data must be 1-dimensional" in str(err): - # e.g. test_setitem_ndarray_3d - raise - raise KeyError(msg) - if not self.hasnans and (codes == -1).any(): - raise KeyError(msg) - - return self.get_indexer(keyarr) + return self.get_indexer_for(keyarr) @doc(Index._maybe_cast_slice_bound) def _maybe_cast_slice_bound(self, label, side: str, kind): diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e308fee5fc808..62b1554246e26 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1246,9 +1246,7 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): indexer, keyarr = ax._convert_listlike_indexer(key) # We only act on all found values: if indexer is not None and (indexer != -1).all(): - self._validate_read_indexer( - keyarr, indexer, axis, raise_missing=raise_missing - ) + # _validate_read_indexer is a no-op if no -1s, so skip return ax[indexer], indexer if ax._index_as_unique: @@ -1309,21 +1307,15 @@ def _validate_read_indexer( not_found = list(set(key) - set(ax)) raise KeyError(f"{not_found} not in index") - # we skip the warning on Categorical - # as this check is actually done (check for - # non-missing values), but a bit later in the - # code, so we want to avoid warning & then - # just raising - if not ax.is_categorical(): - not_found = key[missing_mask] - - with option_context("display.max_seq_items", 10, "display.width", 80): - raise KeyError( - "Passing list-likes to .loc or [] with any missing labels " - "is no longer supported. " - f"The following labels were missing: {not_found}. " - "See https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501 - ) + not_found = key[missing_mask] + + with option_context("display.max_seq_items", 10, "display.width", 80): + raise KeyError( + "Passing list-likes to .loc or [] with any missing labels " + "is no longer supported. " + f"The following labels were missing: {not_found}. " + "See https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501 + ) @doc(IndexingMixin.iloc) diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 9b9ece68b887e..94fc3960f24c5 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -254,23 +256,38 @@ def test_slicing_doc_examples(self): ) tm.assert_frame_equal(result, expected) - def test_loc_listlike(self): - + def test_loc_getitem_listlike_labels(self): # list of labels result = self.df.loc[["c", "a"]] expected = self.df.iloc[[4, 0, 1, 5]] tm.assert_frame_equal(result, expected, check_index_type=True) - result = self.df2.loc[["a", "b", "e"]] - exp_index = CategoricalIndex(list("aaabbe"), categories=list("cabe"), name="B") - expected = DataFrame({"A": [0, 1, 5, 2, 3, np.nan]}, index=exp_index) - tm.assert_frame_equal(result, expected, check_index_type=True) + def test_loc_getitem_listlike_unused_category(self): + # GH#37901 a label that is in index.categories but not in index + # listlike containing an element in the categories but not in the values + msg = ( + "The following labels were missing: CategoricalIndex(['e'], " + "categories=['c', 'a', 'b', 'e'], ordered=False, name='B', " + "dtype='category')" + ) + with pytest.raises(KeyError, match=re.escape(msg)): + self.df2.loc[["a", "b", "e"]] + def test_loc_getitem_label_unused_category(self): # element in the categories but not in the values with pytest.raises(KeyError, match=r"^'e'$"): self.df2.loc["e"] - # assign is ok + def test_loc_getitem_non_category(self): + # not all labels in the categories + msg = ( + "The following labels were missing: Index(['d'], dtype='object', name='B')" + ) + with pytest.raises(KeyError, match=re.escape(msg)): + self.df2.loc[["a", "d"]] + + def test_loc_setitem_expansion_label_unused_category(self): + # assigning with a label that is in the categories but not in the index df = self.df2.copy() df.loc["e"] = 20 result = df.loc[["a", "b", "e"]] @@ -278,17 +295,6 @@ def test_loc_listlike(self): expected = DataFrame({"A": [0, 1, 5, 2, 3, 20]}, index=exp_index) tm.assert_frame_equal(result, expected) - df = self.df2.copy() - result = df.loc[["a", "b", "e"]] - exp_index = CategoricalIndex(list("aaabbe"), categories=list("cabe"), name="B") - expected = DataFrame({"A": [0, 1, 5, 2, 3, np.nan]}, index=exp_index) - tm.assert_frame_equal(result, expected, check_index_type=True) - - # not all labels in the categories - msg = "a list-indexer must only include values that are in the categories" - with pytest.raises(KeyError, match=msg): - self.df2.loc[["a", "d"]] - def test_loc_listlike_dtypes(self): # GH 11586 @@ -309,8 +315,8 @@ def test_loc_listlike_dtypes(self): exp = DataFrame({"A": [1, 1, 2], "B": [4, 4, 5]}, index=exp_index) tm.assert_frame_equal(res, exp, check_index_type=True) - msg = "a list-indexer must only include values that are in the categories" - with pytest.raises(KeyError, match=msg): + msg = "The following labels were missing: Index(['x'], dtype='object')" + with pytest.raises(KeyError, match=re.escape(msg)): df.loc[["a", "x"]] # duplicated categories and codes @@ -332,8 +338,7 @@ def test_loc_listlike_dtypes(self): ) tm.assert_frame_equal(res, exp, check_index_type=True) - msg = "a list-indexer must only include values that are in the categories" - with pytest.raises(KeyError, match=msg): + with pytest.raises(KeyError, match=re.escape(msg)): df.loc[["a", "x"]] # contains unused category @@ -347,13 +352,6 @@ def test_loc_listlike_dtypes(self): ) tm.assert_frame_equal(res, exp, check_index_type=True) - res = df.loc[["a", "e"]] - exp = DataFrame( - {"A": [1, 3, np.nan], "B": [5, 7, np.nan]}, - index=CategoricalIndex(["a", "a", "e"], categories=list("abcde")), - ) - tm.assert_frame_equal(res, exp, check_index_type=True) - # duplicated slice res = df.loc[["a", "a", "b"]] exp = DataFrame( @@ -362,10 +360,27 @@ def test_loc_listlike_dtypes(self): ) tm.assert_frame_equal(res, exp, check_index_type=True) - msg = "a list-indexer must only include values that are in the categories" - with pytest.raises(KeyError, match=msg): + with pytest.raises(KeyError, match=re.escape(msg)): df.loc[["a", "x"]] + def test_loc_getitem_listlike_unused_category_raises_keyerro(self): + # key that is an *unused* category raises + index = CategoricalIndex(["a", "b", "a", "c"], categories=list("abcde")) + df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]}, index=index) + + with pytest.raises(KeyError, match="e"): + # For comparison, check the scalar behavior + df.loc["e"] + + msg = ( + "Passing list-likes to .loc or [] with any missing labels is no " + "longer supported. The following labels were missing: " + "CategoricalIndex(['e'], categories=['a', 'b', 'c', 'd', 'e'], " + "ordered=False, dtype='category'). See https" + ) + with pytest.raises(KeyError, match=re.escape(msg)): + df.loc[["a", "e"]] + def test_ix_categorical_index(self): # GH 12531 df = DataFrame(np.random.randn(3, 3), index=list("ABC"), columns=list("XYZ")) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index b45eddc3ac49c..28846bcf2f14d 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1674,7 +1674,12 @@ def test_loc_getitem_list_of_labels_categoricalindex_with_na(self, box): ser2 = ser[:-1] ci2 = ci[1:] # but if there are no NAs present, this should raise KeyError - msg = "a list-indexer must only include values that are in the categories" + msg = ( + r"Passing list-likes to .loc or \[\] with any missing labels is no " + "longer supported. The following labels were missing: " + r"(Categorical)?Index\(\[nan\], .*\). " + "See https" + ) with pytest.raises(KeyError, match=msg): ser2.loc[box(ci2)]
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This behavior was deprecated for all other Index subclasses a few years ago and finally removed in 1.0. AFAICT not removing it for CategoricalIndex was an oversight. xref https://github.com/pandas-dev/pandas/commit/8ab61851eeae0d16deaa2f6eafcf59393b06aaeb#r44233424
https://api.github.com/repos/pandas-dev/pandas/pulls/37901
2020-11-16T22:02:37Z
2020-11-17T12:56:22Z
2020-11-17T12:56:22Z
2020-11-17T15:25:12Z
TYP: __getitem__ method of EA
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 7eaadecbd6491..07862e0b9bb48 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -1,4 +1,6 @@ -from typing import Any, Optional, Sequence, Type, TypeVar +from __future__ import annotations + +from typing import Any, Optional, Sequence, Type, TypeVar, Union import numpy as np @@ -212,7 +214,9 @@ def __setitem__(self, key, value): def _validate_setitem_value(self, value): return value - def __getitem__(self, key): + def __getitem__( + self: NDArrayBackedExtensionArrayT, key: Union[int, slice, np.ndarray] + ) -> Union[NDArrayBackedExtensionArrayT, Any]: if lib.is_integer(key): # fast-path result = self._ndarray[key] diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 0968545a6b8a4..a0a51495791d1 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -6,6 +6,8 @@ This is an experimental API and subject to breaking changes without warning. """ +from __future__ import annotations + import operator from typing import ( Any, @@ -254,8 +256,9 @@ def _from_factorized(cls, values, original): # Must be a Sequence # ------------------------------------------------------------------------ - def __getitem__(self, item): - # type (Any) -> Any + def __getitem__( + self, item: Union[int, slice, np.ndarray] + ) -> Union[ExtensionArray, Any]: """ Select a subset of self. @@ -661,7 +664,7 @@ def dropna(self): """ return self[~self.isna()] - def shift(self, periods: int = 1, fill_value: object = None) -> "ExtensionArray": + def shift(self, periods: int = 1, fill_value: object = None) -> ExtensionArray: """ Shift values by desired number. @@ -831,7 +834,7 @@ def _values_for_factorize(self) -> Tuple[np.ndarray, Any]: """ return self.astype(object), np.nan - def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, "ExtensionArray"]: + def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ExtensionArray]: """ Encode the extension array as an enumerated type. @@ -940,7 +943,7 @@ def take( *, allow_fill: bool = False, fill_value: Any = None, - ) -> "ExtensionArray": + ) -> ExtensionArray: """ Take elements from an array. @@ -1109,7 +1112,7 @@ def _formatter(self, boxed: bool = False) -> Callable[[Any], Optional[str]]: # Reshaping # ------------------------------------------------------------------------ - def transpose(self, *axes) -> "ExtensionArray": + def transpose(self, *axes) -> ExtensionArray: """ Return a transposed view on this array. @@ -1119,10 +1122,10 @@ def transpose(self, *axes) -> "ExtensionArray": return self[:] @property - def T(self) -> "ExtensionArray": + def T(self) -> ExtensionArray: return self.transpose() - def ravel(self, order="C") -> "ExtensionArray": + def ravel(self, order="C") -> ExtensionArray: """ Return a flattened view on this array. diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 0ce32fcd822e0..30674212239bf 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import datetime, timedelta import operator from typing import ( @@ -264,7 +266,9 @@ def __array__(self, dtype=None) -> np.ndarray: return np.array(list(self), dtype=object) return self._ndarray - def __getitem__(self, key): + def __getitem__( + self, key: Union[int, slice, np.ndarray] + ) -> Union[DatetimeLikeArrayMixin, DTScalarOrNaT]: """ This getitem defers to the underlying array, which by-definition can only handle list-likes, slices, and integer scalars diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index a05dc717f83c1..057162fedaa98 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,5 +1,5 @@ from datetime import datetime, time, timedelta, tzinfo -from typing import Optional, Union +from typing import Optional, Union, cast import warnings import numpy as np @@ -444,9 +444,11 @@ def _generate_range( ) if not left_closed and len(index) and index[0] == start: - index = index[1:] + # TODO: overload DatetimeLikeArrayMixin.__getitem__ + index = cast(DatetimeArray, index[1:]) if not right_closed and len(index) and index[-1] == end: - index = index[:-1] + # TODO: overload DatetimeLikeArrayMixin.__getitem__ + index = cast(DatetimeArray, index[:-1]) dtype = tz_to_dtype(tz) return cls._simple_new(index.asi8, freq=freq, dtype=dtype) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index a4b88427ceb05..caed932cd7857 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -1,4 +1,6 @@ -from typing import TYPE_CHECKING, Optional, Sequence, Tuple, Type, TypeVar +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Optional, Sequence, Tuple, Type, TypeVar, Union import numpy as np @@ -56,7 +58,7 @@ def itemsize(self) -> int: return self.numpy_dtype.itemsize @classmethod - def construct_array_type(cls) -> Type["BaseMaskedArray"]: + def construct_array_type(cls) -> Type[BaseMaskedArray]: """ Return the array type associated with this dtype. @@ -100,7 +102,9 @@ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False): def dtype(self) -> BaseMaskedDtype: raise AbstractMethodError(self) - def __getitem__(self, item): + def __getitem__( + self, item: Union[int, slice, np.ndarray] + ) -> Union[BaseMaskedArray, Any]: if is_integer(item): if self._mask[item]: return self.dtype.na_value diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ec96a0d502d3f..ad8f212aa20ea 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1671,10 +1671,10 @@ def first(self, numeric_only: bool = False, min_count: int = -1): def first_compat(obj: FrameOrSeries, axis: int = 0): def first(x: Series): """Helper function for first item that isn't NA.""" - x = x.array[notna(x.array)] - if len(x) == 0: + arr = x.array[notna(x.array)] + if not len(arr): return np.nan - return x[0] + return arr[0] if isinstance(obj, DataFrame): return obj.apply(first, axis=axis) @@ -1695,10 +1695,10 @@ def last(self, numeric_only: bool = False, min_count: int = -1): def last_compat(obj: FrameOrSeries, axis: int = 0): def last(x: Series): """Helper function for last item that isn't NA.""" - x = x.array[notna(x.array)] - if len(x) == 0: + arr = x.array[notna(x.array)] + if not len(arr): return np.nan - return x[-1] + return arr[-1] if isinstance(obj, DataFrame): return obj.apply(last, axis=axis) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index cb5641a74e60b..1160f374643c7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3212,8 +3212,14 @@ def _get_nearest_indexer(self, target: "Index", limit, tolerance) -> np.ndarray: right_indexer = self.get_indexer(target, "backfill", limit=limit) target_values = target._values - left_distances = np.abs(self._values[left_indexer] - target_values) - right_distances = np.abs(self._values[right_indexer] - target_values) + # error: Unsupported left operand type for - ("ExtensionArray") + left_distances = np.abs( + self._values[left_indexer] - target_values # type: ignore[operator] + ) + # error: Unsupported left operand type for - ("ExtensionArray") + right_distances = np.abs( + self._values[right_indexer] - target_values # type: ignore[operator] + ) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( @@ -3232,7 +3238,8 @@ def _filter_indexer_tolerance( indexer: np.ndarray, tolerance, ) -> np.ndarray: - distance = abs(self._values[indexer] - target) + # error: Unsupported left operand type for - ("ExtensionArray") + distance = abs(self._values[indexer] - target) # type: ignore[operator] indexer = np.where(distance <= tolerance, indexer, -1) return indexer @@ -3436,6 +3443,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: + values: Union[range, ExtensionArray, np.ndarray] if isinstance(self, ABCRangeIndex): values = range(0) else: @@ -4528,8 +4536,9 @@ def asof_locs(self, where: "Index", mask) -> np.ndarray: result = np.arange(len(self))[mask].take(locs) - first = mask.argmax() - result[(locs == 0) & (where._values < self._values[first])] = -1 + # TODO: overload return type of ExtensionArray.__getitem__ + first_value = cast(Any, self._values[mask.argmax()]) + result[(locs == 0) & (where._values < first_value)] = -1 return result diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 44c20ad0de848..aaccef61fbaba 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -1,5 +1,5 @@ from datetime import datetime, timedelta -from typing import Any +from typing import Any, cast import numpy as np @@ -694,7 +694,10 @@ def difference(self, other, sort=None): if self.equals(other): # pass an empty PeriodArray with the appropriate dtype - return type(self)._simple_new(self._data[:0], name=self.name) + + # TODO: overload DatetimeLikeArrayMixin.__getitem__ + values = cast(PeriodArray, self._data[:0]) + return type(self)._simple_new(values, name=self.name) if is_object_dtype(other): return self.astype(object).difference(other).astype(self.dtype)
as a precursor to https://github.com/pandas-dev/pandas/pull/35259#discussion_r522951940 there are more changes/additions needed but this PR might get too big to review and push through in a timely manor. overload is required to avoid casts Any is used for any type, whereas should use object. only the abstract classes have been typed. could create a scalar type alias in each of the EA classes similar to DTScalarOrNaT in DatetimeLikeArrayMixin import and expose pandas._libs.missing.NAType in pandas._typing any of these could be done here or as follow-on
https://api.github.com/repos/pandas-dev/pandas/pulls/37898
2020-11-16T18:31:16Z
2020-11-17T12:57:52Z
2020-11-17T12:57:52Z
2020-11-17T19:56:05Z
TST: parametrize test_info
diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py index 8c2155aec7248..329215d41fd41 100644 --- a/pandas/tests/io/formats/test_info.py +++ b/pandas/tests/io/formats/test_info.py @@ -16,39 +16,13 @@ Series, date_range, option_context, - reset_option, - set_option, ) -import pandas._testing as tm @pytest.fixture -def datetime_frame(): - """ - Fixture for DataFrame of floats with DatetimeIndex - - Columns are ['A', 'B', 'C', 'D'] - - A B C D - 2000-01-03 -1.122153 0.468535 0.122226 1.693711 - 2000-01-04 0.189378 0.486100 0.007864 -1.216052 - 2000-01-05 0.041401 -0.835752 -0.035279 -0.414357 - 2000-01-06 0.430050 0.894352 0.090719 0.036939 - 2000-01-07 -0.620982 -0.668211 -0.706153 1.466335 - 2000-01-10 -0.752633 0.328434 -0.815325 0.699674 - 2000-01-11 -2.236969 0.615737 -0.829076 -1.196106 - ... ... ... ... ... - 2000-02-03 1.642618 -0.579288 0.046005 1.385249 - 2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351 - 2000-02-07 -2.656149 -0.601387 1.410148 0.444150 - 2000-02-08 -1.201881 -1.289040 0.772992 -1.445300 - 2000-02-09 1.377373 0.398619 1.008453 -0.928207 - 2000-02-10 0.473194 -0.636677 0.984058 0.511519 - 2000-02-11 -0.965556 0.408313 -1.312844 -0.381948 - - [30 rows x 4 columns] - """ - return DataFrame(tm.getTimeSeriesData()) +def duplicate_columns_frame(): + """Dataframe with duplicate column names.""" + return DataFrame(np.random.randn(1500, 4), columns=["a", "a", "b", "b"]) def test_info_empty(): @@ -65,9 +39,7 @@ def test_info_empty(): assert result == expected -def test_info_categorical_column(): - - # make sure it works +def test_info_categorical_column_smoke_test(): n = 2500 df = DataFrame({"int64": np.random.randint(100, size=n)}) df["category"] = Series( @@ -82,18 +54,48 @@ def test_info_categorical_column(): df2.info(buf=buf) -def test_info(float_frame, datetime_frame): - io = StringIO() - float_frame.info(buf=io) - datetime_frame.info(buf=io) +@pytest.mark.parametrize( + "fixture_func_name", + [ + "int_frame", + "float_frame", + "datetime_frame", + "duplicate_columns_frame", + ], +) +def test_info_smoke_test(fixture_func_name, request): + frame = request.getfixturevalue(fixture_func_name) + buf = StringIO() + frame.info(buf=buf) + result = buf.getvalue().splitlines() + assert len(result) > 10 - frame = DataFrame(np.random.randn(5, 3)) - frame.info() - frame.info(verbose=False) +@pytest.mark.parametrize( + "num_columns, max_info_columns, verbose", + [ + (10, 100, True), + (10, 11, True), + (10, 10, True), + (10, 9, False), + (10, 1, False), + ], +) +def test_info_default_verbose_selection(num_columns, max_info_columns, verbose): + frame = DataFrame(np.random.randn(5, num_columns)) + with option_context("display.max_info_columns", max_info_columns): + io_default = StringIO() + frame.info(buf=io_default) + result = io_default.getvalue() + io_explicit = StringIO() + frame.info(buf=io_explicit, verbose=verbose) + expected = io_explicit.getvalue() -def test_info_verbose(): + assert result == expected + + +def test_info_verbose_check_header_separator_body(): buf = StringIO() size = 1001 start = 5 @@ -202,33 +204,23 @@ def test_info_wide(): io = StringIO() df.info(buf=io, max_cols=101) - rs = io.getvalue() - assert len(rs.splitlines()) > 100 - xp = rs - - set_option("display.max_info_columns", 101) - io = StringIO() - df.info(buf=io) - assert rs == xp - reset_option("display.max_info_columns") - + result = io.getvalue() + assert len(result.splitlines()) > 100 -def test_info_duplicate_columns(): - io = StringIO() - - # it works! - frame = DataFrame(np.random.randn(1500, 4), columns=["a", "a", "b", "b"]) - frame.info(buf=io) + expected = result + with option_context("display.max_info_columns", 101): + io = StringIO() + df.info(buf=io) + result = io.getvalue() + assert result == expected def test_info_duplicate_columns_shows_correct_dtypes(): # GH11761 io = StringIO() - frame = DataFrame([[1, 2.0]], columns=["a", "a"]) frame.info(buf=io) - io.seek(0) - lines = io.readlines() + lines = io.getvalue().splitlines(True) assert " 0 a 1 non-null int64 \n" == lines[5] assert " 1 a 1 non-null float64\n" == lines[6] @@ -272,7 +264,6 @@ def test_info_max_cols(): assert len(res.strip().split("\n")) == len_ for len_, verbose in [(12, None), (5, False), (12, True)]: - # max_cols not exceeded with option_context("max_info_columns", 5): buf = StringIO() @@ -418,7 +409,6 @@ def test_usage_via_getsizeof(): def test_info_memory_usage_qualified(): - buf = StringIO() df = DataFrame(1, columns=list("ab"), index=[1, 2, 3]) df.info(buf=buf) @@ -454,7 +444,8 @@ def memory_usage(f): N = 100 M = len(uppercase) index = MultiIndex.from_product( - [list(uppercase), date_range("20160101", periods=N)], names=["id", "date"] + [list(uppercase), date_range("20160101", periods=N)], + names=["id", "date"], ) df = DataFrame({"value": np.random.randn(N * M)}, index=index)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Precursor for #37320 - Parametrize tests - Rename some of test functions
https://api.github.com/repos/pandas-dev/pandas/pulls/37887
2020-11-16T06:09:56Z
2020-11-25T22:35:38Z
2020-11-25T22:35:38Z
2020-12-03T07:56:54Z
REF: Use more memory views in rolling aggregations
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 4de7a5860c465..c1b5adab654cb 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -136,7 +136,7 @@ cdef inline void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x, sum_x[0] = t -def roll_sum(ndarray[float64_t] values, ndarray[int64_t] start, +def roll_sum(const float64_t[:] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp): cdef: float64_t sum_x = 0, compensation_add = 0, compensation_remove = 0 @@ -240,7 +240,7 @@ cdef inline void remove_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x, neg_ct[0] = neg_ct[0] - 1 -def roll_mean(ndarray[float64_t] values, ndarray[int64_t] start, +def roll_mean(const float64_t[:] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp): cdef: float64_t val, compensation_add = 0, compensation_remove = 0, sum_x = 0 @@ -361,7 +361,7 @@ cdef inline void remove_var(float64_t val, float64_t *nobs, float64_t *mean_x, ssqdm_x[0] = 0 -def roll_var(ndarray[float64_t] values, ndarray[int64_t] start, +def roll_var(const float64_t[:] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp, int ddof=1): """ Numerically stable implementation using Welford's method. @@ -772,7 +772,7 @@ def roll_kurt(ndarray[float64_t] values, ndarray[int64_t] start, # Rolling median, min, max -def roll_median_c(ndarray[float64_t] values, ndarray[int64_t] start, +def roll_median_c(const float64_t[:] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp): # GH 32865. win argument kept for compatibility cdef: @@ -1032,7 +1032,7 @@ interpolation_types = { } -def roll_quantile(ndarray[float64_t, cast=True] values, ndarray[int64_t] start, +def roll_quantile(const float64_t[:] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp, float64_t quantile, str interpolation): """
- [x] tests added / passed
https://api.github.com/repos/pandas-dev/pandas/pulls/37886
2020-11-16T03:02:32Z
2020-11-16T17:02:17Z
2020-11-16T17:02:17Z
2020-11-16T17:02:48Z
REF: Index._intersection
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index cb5641a74e60b..3243fcf0742a0 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2798,6 +2798,13 @@ def intersection(self, other, sort=False): other = other.astype("O") return this.intersection(other, sort=sort) + result = self._intersection(other, sort=sort) + return self._wrap_setop_result(other, result) + + def _intersection(self, other, sort=False): + """ + intersection specialized to the case with matching dtypes. + """ # TODO(EA): setops-refactor, clean all this up lvals = self._values rvals = other._values @@ -2808,7 +2815,7 @@ def intersection(self, other, sort=False): except TypeError: pass else: - return self._wrap_setop_result(other, result) + return result try: indexer = Index(rvals).get_indexer(lvals) @@ -2824,7 +2831,7 @@ def intersection(self, other, sort=False): if sort is None: result = algos.safe_sort(result) - return self._wrap_setop_result(other, result) + return result def difference(self, other, sort=None): """
Follow the same pattern as Index._union. Motivated by trying to get stricter typing in CategoricalIndex._shallow_copy, which is blocked by pinning down return types for set ops.
https://api.github.com/repos/pandas-dev/pandas/pulls/37885
2020-11-16T02:51:39Z
2020-11-17T01:16:31Z
2020-11-17T01:16:31Z
2020-11-17T01:26:47Z
REF: make casting explicit in CategoricalIndex
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 06df8f85cded7..d2ec2bcfeb470 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -491,6 +491,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): # in which case we are going to conform to the passed Categorical new_target = np.asarray(new_target) if is_categorical_dtype(target): + new_target = Categorical(new_target, dtype=target.dtype) new_target = target._shallow_copy(new_target, name=self.name) else: new_target = Index(new_target, name=self.name) @@ -514,6 +515,7 @@ def _reindex_non_unique(self, target): if not (cats == -1).any(): # .reindex returns normal Index. Revert to CategoricalIndex if # all targets are included in my categories + new_target = Categorical(new_target, dtype=self.dtype) new_target = self._shallow_copy(new_target) return new_target, indexer, new_indexer
Motivated by trying to get stricter typing in CategoricalIndex._shallow_copy.
https://api.github.com/repos/pandas-dev/pandas/pulls/37884
2020-11-16T02:49:43Z
2020-11-17T01:28:53Z
2020-11-17T01:28:53Z
2020-11-17T01:32:52Z
REF: simplify setitem_with_indexer
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index a8951e342e0da..e308fee5fc808 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1647,6 +1647,8 @@ def _setitem_with_indexer_split_path(self, indexer, value): indexer = _tuplify(self.ndim, indexer) if len(indexer) > self.ndim: raise IndexError("too many indices for array") + if isinstance(indexer[0], np.ndarray) and indexer[0].ndim > 2: + raise ValueError(r"Cannot set values with ndim > 2") if isinstance(value, ABCSeries): value = self._align_series(indexer, value) @@ -1658,57 +1660,45 @@ def _setitem_with_indexer_split_path(self, indexer, value): lplane_indexer = length_of_indexer(pi, self.obj.index) # lplane_indexer gives the expected length of obj[indexer[0]] - if len(ilocs) == 1: - # We can operate on a single column - - # require that we are setting the right number of values that - # we are indexing - if is_list_like_indexer(value) and 0 != lplane_indexer != len(value): - # Exclude zero-len for e.g. boolean masking that is all-false - raise ValueError( - "cannot set using a multi-index " - "selection indexer with a different " - "length than the value" - ) - # we need an iterable, with a ndim of at least 1 # eg. don't pass through np.array(0) if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0: - # we have an equal len Frame if isinstance(value, ABCDataFrame): self._setitem_with_indexer_frame_value(indexer, value) - # we have an equal len ndarray/convertible to our ilocs - # hasattr first, to avoid coercing to ndarray without reason. - # But we may be relying on the ndarray coercion to check ndim. - # Why not just convert to an ndarray earlier on if needed? elif np.ndim(value) == 2: self._setitem_with_indexer_2d_value(indexer, value) elif len(ilocs) == 1 and lplane_indexer == len(value) and not is_scalar(pi): - # we have an equal len list/ndarray - # We only get here with len(ilocs) == 1 + # We are setting multiple rows in a single column. self._setitem_single_column(ilocs[0], value, pi) + elif len(ilocs) == 1 and 0 != lplane_indexer != len(value): + # We are trying to set N values into M entries of a single + # column, which is invalid for N != M + # Exclude zero-len for e.g. boolean masking that is all-false + raise ValueError( + "Must have equal len keys and value " + "when setting with an iterable" + ) + elif lplane_indexer == 0 and len(value) == len(self.obj.index): # We get here in one case via .loc with a all-False mask pass - else: - # per-label values - if len(ilocs) != len(value): - raise ValueError( - "Must have equal len keys and value " - "when setting with an iterable" - ) - + elif len(ilocs) == len(value): + # We are setting multiple columns in a single row. for loc, v in zip(ilocs, value): self._setitem_single_column(loc, v, pi) - else: - if isinstance(indexer[0], np.ndarray) and indexer[0].ndim > 2: - raise ValueError(r"Cannot set values with ndim > 2") + else: + raise ValueError( + "Must have equal len keys and value " + "when setting with an iterable" + ) + + else: # scalar value for loc in ilocs: diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py index 543416126f12c..f95eac57e9140 100644 --- a/pandas/tests/indexing/multiindex/test_setitem.py +++ b/pandas/tests/indexing/multiindex/test_setitem.py @@ -203,10 +203,7 @@ def test_multiindex_assignment(self): tm.assert_series_equal(df.loc[4, "c"], exp) # invalid assignments - msg = ( - "cannot set using a multi-index selection indexer " - "with a different length than the value" - ) + msg = "Must have equal len keys and value when setting with an iterable" with pytest.raises(ValueError, match=msg): df.loc[4, "c"] = [0, 1, 2, 3] diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 472b29981e78c..7470d2768590e 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -33,10 +33,7 @@ def test_setitem_ndarray_1d(self): df["bar"] = np.zeros(10, dtype=complex) # invalid - msg = ( - "cannot set using a multi-index selection " - "indexer with a different length than the value" - ) + msg = "Must have equal len keys and value when setting with an iterable" with pytest.raises(ValueError, match=msg): df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
Good news: this is the last pure-cleanup before we start actually changing the logic.
https://api.github.com/repos/pandas-dev/pandas/pulls/37881
2020-11-15T23:54:02Z
2020-11-17T01:23:41Z
2020-11-17T01:23:41Z
2020-11-17T01:28:59Z
CI: DataFrame.apply(np.any)
diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py index 486d140849159..05ceb2ded71d0 100644 --- a/pandas/tests/frame/conftest.py +++ b/pandas/tests/frame/conftest.py @@ -76,6 +76,11 @@ def bool_frame_with_na(): # set some NAs df.iloc[5:10] = np.nan df.iloc[15:20, -2:] = np.nan + + # For `any` tests we need to have at least one True before the first NaN + # in each column + for i in range(4): + df.iloc[i, i] = True return df
Should fix these failures https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=47831&view=logs&j=f016abb9-7827-5fa2-935a-22bd9b1477b6&t=c86edbe1-8c1d-5e5c-5b6f-d970fa4acf6d
https://api.github.com/repos/pandas-dev/pandas/pulls/37879
2020-11-15T22:35:51Z
2020-11-16T00:40:58Z
2020-11-16T00:40:58Z
2020-11-16T00:41:01Z
ENH: Support groupby.ewm operations
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index 226b225b47591..79a33c437ea5c 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -225,4 +225,17 @@ def time_rolling_offset(self, method): getattr(self.groupby_roll_offset, method)() +class GroupbyEWM: + + params = ["cython", "numba"] + param_names = ["engine"] + + def setup(self, engine): + df = pd.DataFrame({"A": range(50), "B": range(50)}) + self.gb_ewm = df.groupby("A").ewm(com=1.0) + + def time_groupby_mean(self, engine): + self.gb_ewm.mean(engine=engine) + + from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst index 47ef1e9c8c4d7..05f8be091fa25 100644 --- a/doc/source/user_guide/window.rst +++ b/doc/source/user_guide/window.rst @@ -43,7 +43,7 @@ Concept Method Returned Object Rolling window ``rolling`` ``Rolling`` Yes Yes Weighted window ``rolling`` ``Window`` No No Expanding window ``expanding`` ``Expanding`` No Yes -Exponentially Weighted window ``ewm`` ``ExponentialMovingWindow`` No No +Exponentially Weighted window ``ewm`` ``ExponentialMovingWindow`` No Yes (as of version 1.2) ============================= ================= =========================== =========================== ======================== As noted above, some operations support specifying a window based on a time offset: diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 62da3c0c5cddc..15b5485b26402 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -204,6 +204,23 @@ example where the index name is preserved: The same is true for :class:`MultiIndex`, but the logic is applied separately on a level-by-level basis. +.. _whatsnew_120.groupby_ewm: + +Groupby supports EWM operations directly +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:class:`DataFrameGroupBy` now supports exponentially weighted window operations directly (:issue:`16037`). + +.. ipython:: python + + df = pd.DataFrame({'A': ['a', 'b', 'a', 'b'], 'B': range(4)}) + df + df.groupby('A').ewm(com=1.0).mean() + +Additionally ``mean`` supports execution via `Numba <https://numba.pydata.org/>`__ with +the ``engine`` and ``engine_kwargs`` arguments. Numba must be installed as an optional dependency +to use this feature. + .. _whatsnew_120.enhancements.other: Other enhancements diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index c1b5adab654cb..54a09a6d2ede7 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -1496,8 +1496,8 @@ def roll_weighted_var(float64_t[:] values, float64_t[:] weights, # ---------------------------------------------------------------------- # Exponentially weighted moving average -def ewma_time(const float64_t[:] vals, int minp, ndarray[int64_t] times, - int64_t halflife): +def ewma_time(const float64_t[:] vals, int64_t[:] start, int64_t[:] end, + int minp, ndarray[int64_t] times, int64_t halflife): """ Compute exponentially-weighted moving average using halflife and time distances. @@ -1505,6 +1505,8 @@ def ewma_time(const float64_t[:] vals, int minp, ndarray[int64_t] times, Parameters ---------- vals : ndarray[float_64] + start: ndarray[int_64] + end: ndarray[int_64] minp : int times : ndarray[int64] halflife : int64 @@ -1552,17 +1554,20 @@ def ewma_time(const float64_t[:] vals, int minp, ndarray[int64_t] times, return output -def ewma(float64_t[:] vals, float64_t com, bint adjust, bint ignore_na, int minp): +def ewma(float64_t[:] vals, int64_t[:] start, int64_t[:] end, int minp, + float64_t com, bint adjust, bint ignore_na): """ Compute exponentially-weighted moving average using center-of-mass. Parameters ---------- vals : ndarray (float64 type) + start: ndarray (int64 type) + end: ndarray (int64 type) + minp : int com : float64 adjust : int ignore_na : bool - minp : int Returns ------- @@ -1620,19 +1625,21 @@ def ewma(float64_t[:] vals, float64_t com, bint adjust, bint ignore_na, int minp # Exponentially weighted moving covariance -def ewmcov(float64_t[:] input_x, float64_t[:] input_y, - float64_t com, bint adjust, bint ignore_na, int minp, bint bias): +def ewmcov(float64_t[:] input_x, int64_t[:] start, int64_t[:] end, int minp, + float64_t[:] input_y, float64_t com, bint adjust, bint ignore_na, bint bias): """ Compute exponentially-weighted moving variance using center-of-mass. Parameters ---------- input_x : ndarray (float64 type) + start: ndarray (int64 type) + end: ndarray (int64 type) + minp : int input_y : ndarray (float64 type) com : float64 adjust : int ignore_na : bool - minp : int bias : int Returns diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index f205226c03a53..7dc0db35bf8fe 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -192,6 +192,7 @@ def _gotitem(self, key, ndim, subset=None): "describe", "dtypes", "expanding", + "ewm", "filter", "get_group", "groups", diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ad8f212aa20ea..6e26e9a43bb2a 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1859,6 +1859,16 @@ def expanding(self, *args, **kwargs): return ExpandingGroupby(self, *args, **kwargs) + @Substitution(name="groupby") + @Appender(_common_see_also) + def ewm(self, *args, **kwargs): + """ + Return an ewm grouper, providing ewm functionality per group. + """ + from pandas.core.window import ExponentialMovingWindowGroupby + + return ExponentialMovingWindowGroupby(self, *args, **kwargs) + def _fill(self, direction, limit=None): """ Shared function for `pad` and `backfill` to call Cython method. diff --git a/pandas/core/window/__init__.py b/pandas/core/window/__init__.py index 304c61ac0e489..b3d0820fee4da 100644 --- a/pandas/core/window/__init__.py +++ b/pandas/core/window/__init__.py @@ -1,3 +1,6 @@ -from pandas.core.window.ewm import ExponentialMovingWindow # noqa:F401 +from pandas.core.window.ewm import ( # noqa:F401 + ExponentialMovingWindow, + ExponentialMovingWindowGroupby, +) from pandas.core.window.expanding import Expanding, ExpandingGroupby # noqa:F401 from pandas.core.window.rolling import Rolling, RollingGroupby, Window # noqa:F401 diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index b601bacec35f1..f8237a436f436 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -14,8 +14,20 @@ from pandas.core.dtypes.common import is_datetime64_ns_dtype import pandas.core.common as common -from pandas.core.window.common import _doc_template, _shared_docs, zsqrt -from pandas.core.window.rolling import BaseWindow, flex_binary_moment +from pandas.core.util.numba_ import maybe_use_numba +from pandas.core.window.common import ( + _doc_template, + _shared_docs, + flex_binary_moment, + zsqrt, +) +from pandas.core.window.indexers import ( + BaseIndexer, + ExponentialMovingWindowIndexer, + GroupbyIndexer, +) +from pandas.core.window.numba_ import generate_numba_groupby_ewma_func +from pandas.core.window.rolling import BaseWindow, BaseWindowGroupby, dispatch if TYPE_CHECKING: from pandas import Series @@ -219,14 +231,16 @@ def __init__( ignore_na: bool = False, axis: int = 0, times: Optional[Union[str, np.ndarray, FrameOrSeries]] = None, + **kwargs, ): - self.com: Optional[float] self.obj = obj self.min_periods = max(int(min_periods), 1) self.adjust = adjust self.ignore_na = ignore_na self.axis = axis self.on = None + self.center = False + self.closed = None if times is not None: if isinstance(times, str): times = self._selected_obj[times] @@ -245,7 +259,7 @@ def __init__( if common.count_not_none(com, span, alpha) > 0: self.com = get_center_of_mass(com, span, None, alpha) else: - self.com = None + self.com = 0.0 else: if halflife is not None and isinstance(halflife, (str, datetime.timedelta)): raise ValueError( @@ -260,6 +274,12 @@ def __init__( def _constructor(self): return ExponentialMovingWindow + def _get_window_indexer(self) -> BaseIndexer: + """ + Return an indexer class that will compute the window start and end bounds + """ + return ExponentialMovingWindowIndexer() + _agg_see_also_doc = dedent( """ See Also @@ -299,27 +319,6 @@ def aggregate(self, func, *args, **kwargs): agg = aggregate - def _apply(self, func): - """ - Rolling statistical measure using supplied function. Designed to be - used with passed-in Cython array-based functions. - - Parameters - ---------- - func : str/callable to apply - - Returns - ------- - y : same type as input argument - """ - - def homogeneous_func(values: np.ndarray): - if values.size == 0: - return values.copy() - return np.apply_along_axis(func, self.axis, values) - - return self._apply_blockwise(homogeneous_func) - @Substitution(name="ewm", func_name="mean") @Appender(_doc_template) def mean(self, *args, **kwargs): @@ -336,7 +335,6 @@ def mean(self, *args, **kwargs): window_func = self._get_roll_func("ewma_time") window_func = partial( window_func, - minp=self.min_periods, times=self.times, halflife=self.halflife, ) @@ -347,7 +345,6 @@ def mean(self, *args, **kwargs): com=self.com, adjust=self.adjust, ignore_na=self.ignore_na, - minp=self.min_periods, ) return self._apply(window_func) @@ -371,13 +368,19 @@ def var(self, bias: bool = False, *args, **kwargs): Exponential weighted moving variance. """ nv.validate_window_func("var", args, kwargs) + window_func = self._get_roll_func("ewmcov") + window_func = partial( + window_func, + com=self.com, + adjust=self.adjust, + ignore_na=self.ignore_na, + bias=bias, + ) - def f(arg): - return window_aggregations.ewmcov( - arg, arg, self.com, self.adjust, self.ignore_na, self.min_periods, bias - ) + def var_func(values, begin, end, min_periods): + return window_func(values, begin, end, min_periods, values) - return self._apply(f) + return self._apply(var_func) @Substitution(name="ewm", func_name="cov") @Appender(_doc_template) @@ -419,11 +422,13 @@ def _get_cov(X, Y): Y = self._shallow_copy(Y) cov = window_aggregations.ewmcov( X._prep_values(), + np.array([0], dtype=np.int64), + np.array([0], dtype=np.int64), + self.min_periods, Y._prep_values(), self.com, self.adjust, self.ignore_na, - self.min_periods, bias, ) return wrap_result(X, cov) @@ -470,7 +475,15 @@ def _get_corr(X, Y): def _cov(x, y): return window_aggregations.ewmcov( - x, y, self.com, self.adjust, self.ignore_na, self.min_periods, 1 + x, + np.array([0], dtype=np.int64), + np.array([0], dtype=np.int64), + self.min_periods, + y, + self.com, + self.adjust, + self.ignore_na, + 1, ) x_values = X._prep_values() @@ -485,3 +498,78 @@ def _cov(x, y): return flex_binary_moment( self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) ) + + +class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow): + """ + Provide an exponential moving window groupby implementation. + """ + + def _get_window_indexer(self) -> GroupbyIndexer: + """ + Return an indexer class that will compute the window start and end bounds + + Returns + ------- + GroupbyIndexer + """ + window_indexer = GroupbyIndexer( + groupby_indicies=self._groupby.indices, + window_indexer=ExponentialMovingWindowIndexer, + ) + return window_indexer + + var = dispatch("var", bias=False) + std = dispatch("std", bias=False) + cov = dispatch("cov", other=None, pairwise=None, bias=False) + corr = dispatch("corr", other=None, pairwise=None) + + def mean(self, engine=None, engine_kwargs=None): + """ + Parameters + ---------- + engine : str, default None + * ``'cython'`` : Runs mean through C-extensions from cython. + * ``'numba'`` : Runs mean through JIT compiled code from numba. + Only available when ``raw`` is set to ``True``. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.2.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{'nopython': True, 'nogil': False, 'parallel': False}``. + + .. versionadded:: 1.2.0 + + Returns + ------- + Series or DataFrame + Return type is determined by the caller. + """ + if maybe_use_numba(engine): + groupby_ewma_func = generate_numba_groupby_ewma_func( + engine_kwargs, + self.com, + self.adjust, + self.ignore_na, + ) + return self._apply( + groupby_ewma_func, + numba_cache_key=(lambda x: x, "groupby_ewma"), + ) + elif engine in ("cython", None): + if engine_kwargs is not None: + raise ValueError("cython engine does not accept engine_kwargs") + + def f(x): + x = self._shallow_copy(x, groupby=self._groupby) + return x.mean() + + return self._groupby.apply(f) + else: + raise ValueError("engine must be either 'numba' or 'cython'") diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index a8229257bb7bb..a3b9695d777d9 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -344,3 +344,18 @@ def get_window_bounds( start = np.concatenate(start_arrays) end = np.concatenate(end_arrays) return start, end + + +class ExponentialMovingWindowIndexer(BaseIndexer): + """Calculate ewm window bounds (the entire window)""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: Optional[int] = None, + center: Optional[bool] = None, + closed: Optional[str] = None, + ) -> Tuple[np.ndarray, np.ndarray]: + + return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64) diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py index c4858b6e5a4ab..274586e1745b5 100644 --- a/pandas/core/window/numba_.py +++ b/pandas/core/window/numba_.py @@ -72,3 +72,92 @@ def roll_apply( return result return roll_apply + + +def generate_numba_groupby_ewma_func( + engine_kwargs: Optional[Dict[str, bool]], + com: float, + adjust: bool, + ignore_na: bool, +): + """ + Generate a numba jitted groupby ewma function specified by values + from engine_kwargs. + + Parameters + ---------- + engine_kwargs : dict + dictionary of arguments to be passed into numba.jit + com : float + adjust : bool + ignore_na : bool + + Returns + ------- + Numba function + """ + nopython, nogil, parallel = get_jit_arguments(engine_kwargs) + + cache_key = (lambda x: x, "groupby_ewma") + if cache_key in NUMBA_FUNC_CACHE: + return NUMBA_FUNC_CACHE[cache_key] + + numba = import_optional_dependency("numba") + if parallel: + loop_range = numba.prange + else: + loop_range = range + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def groupby_ewma( + values: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + minimum_periods: int, + ) -> np.ndarray: + result = np.empty(len(values)) + alpha = 1.0 / (1.0 + com) + for i in loop_range(len(begin)): + start = begin[i] + stop = end[i] + window = values[start:stop] + sub_result = np.empty(len(window)) + + old_wt_factor = 1.0 - alpha + new_wt = 1.0 if adjust else alpha + + weighted_avg = window[0] + nobs = int(not np.isnan(weighted_avg)) + sub_result[0] = weighted_avg if nobs >= minimum_periods else np.nan + old_wt = 1.0 + + for j in range(1, len(window)): + cur = window[j] + is_observation = not np.isnan(cur) + nobs += is_observation + if not np.isnan(weighted_avg): + + if is_observation or not ignore_na: + + old_wt *= old_wt_factor + if is_observation: + + # avoid numerical errors on constant series + if weighted_avg != cur: + weighted_avg = ( + (old_wt * weighted_avg) + (new_wt * cur) + ) / (old_wt + new_wt) + if adjust: + old_wt += new_wt + else: + old_wt = 1.0 + elif is_observation: + weighted_avg = cur + + sub_result[j] = weighted_avg if nobs >= minimum_periods else np.nan + + result[start:stop] = sub_result + + return result + + return groupby_ewma diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index f65452cb2f17f..e74ae5311125e 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -405,7 +405,7 @@ def _apply( self, func: Callable[..., Any], name: Optional[str] = None, - use_numba_cache: bool = False, + numba_cache_key: Optional[Tuple[Callable, str]] = None, **kwargs, ): """ @@ -417,9 +417,8 @@ def _apply( ---------- func : callable function to apply name : str, - use_numba_cache : bool - whether to cache a numba compiled function. Only available for numba - enabled methods (so far only apply) + numba_cache_key : tuple + caching key to be used to store a compiled numba func **kwargs additional arguments for rolling function and window function @@ -456,8 +455,8 @@ def calc(x): result = calc(values) result = np.asarray(result) - if use_numba_cache: - NUMBA_FUNC_CACHE[(kwargs["original_func"], "rolling_apply")] = func + if numba_cache_key is not None: + NUMBA_FUNC_CACHE[numba_cache_key] = func return result @@ -715,7 +714,7 @@ def aggregate(self, func, *args, **kwargs): ) -def _dispatch(name: str, *args, **kwargs): +def dispatch(name: str, *args, **kwargs): """ Dispatch to groupby apply. """ @@ -746,20 +745,20 @@ def __init__(self, obj, *args, **kwargs): self._groupby.grouper.mutated = True super().__init__(obj, *args, **kwargs) - corr = _dispatch("corr", other=None, pairwise=None) - cov = _dispatch("cov", other=None, pairwise=None) + corr = dispatch("corr", other=None, pairwise=None) + cov = dispatch("cov", other=None, pairwise=None) def _apply( self, func: Callable[..., Any], name: Optional[str] = None, - use_numba_cache: bool = False, + numba_cache_key: Optional[Tuple[Callable, str]] = None, **kwargs, ) -> FrameOrSeries: result = super()._apply( func, name, - use_numba_cache, + numba_cache_key, **kwargs, ) # Reconstruct the resulting MultiIndex from tuples @@ -1038,7 +1037,7 @@ def _apply( self, func: Callable[[np.ndarray, int, int], np.ndarray], name: Optional[str] = None, - use_numba_cache: bool = False, + numba_cache_key: Optional[Tuple[Callable, str]] = None, **kwargs, ): """ @@ -1050,9 +1049,8 @@ def _apply( ---------- func : callable function to apply name : str, - use_numba_cache : bool - whether to cache a numba compiled function. Only available for numba - enabled methods (so far only apply) + use_numba_cache : tuple + unused **kwargs additional arguments for scipy windows if necessary @@ -1292,10 +1290,12 @@ def apply( if not is_bool(raw): raise ValueError("raw parameter must be `True` or `False`") + numba_cache_key = None if maybe_use_numba(engine): if raw is False: raise ValueError("raw must be `True` when using the numba engine") apply_func = generate_numba_apply_func(args, kwargs, func, engine_kwargs) + numba_cache_key = (func, "rolling_apply") elif engine in ("cython", None): if engine_kwargs is not None: raise ValueError("cython engine does not accept engine_kwargs") @@ -1305,10 +1305,7 @@ def apply( return self._apply( apply_func, - use_numba_cache=maybe_use_numba(engine), - original_func=func, - args=args, - kwargs=kwargs, + numba_cache_key=numba_cache_key, ) def _generate_cython_apply_func( diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py index 4a735fc7bb686..34729c771eac9 100644 --- a/pandas/tests/groupby/test_allowlist.py +++ b/pandas/tests/groupby/test_allowlist.py @@ -329,6 +329,7 @@ def test_tab_completion(mframe): "expanding", "pipe", "sample", + "ewm", } assert results == expected diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py index 1780925202593..a803ce716eb05 100644 --- a/pandas/tests/window/conftest.py +++ b/pandas/tests/window/conftest.py @@ -74,6 +74,18 @@ def nopython(request): return request.param +@pytest.fixture(params=[True, False]) +def adjust(request): + """adjust keyword argument for ewm""" + return request.param + + +@pytest.fixture(params=[True, False]) +def ignore_na(request): + """ignore_na keyword argument for ewm""" + return request.param + + @pytest.fixture( params=[ pytest.param( diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_groupby.py similarity index 91% rename from pandas/tests/window/test_grouper.py rename to pandas/tests/window/test_groupby.py index 65906df819054..c4de112bd6dc0 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_groupby.py @@ -631,3 +631,60 @@ def test_groupby_rolling_index_level_and_column_label(self): ), ) tm.assert_frame_equal(result, expected) + + +class TestEWM: + @pytest.mark.parametrize( + "method, expected_data", + [ + ["mean", [0.0, 0.6666666666666666, 1.4285714285714286, 2.2666666666666666]], + ["std", [np.nan, 0.707107, 0.963624, 1.177164]], + ["var", [np.nan, 0.5, 0.9285714285714286, 1.3857142857142857]], + ], + ) + def test_methods(self, method, expected_data): + # GH 16037 + df = DataFrame({"A": ["a"] * 4, "B": range(4)}) + result = getattr(df.groupby("A").ewm(com=1.0), method)() + expected = DataFrame( + {"B": expected_data}, + index=MultiIndex.from_tuples( + [ + ("a", 0), + ("a", 1), + ("a", 2), + ("a", 3), + ], + names=["A", None], + ), + ) + tm.assert_frame_equal(result, expected) + + expected = df.groupby("A").apply(lambda x: getattr(x.ewm(com=1.0), method)()) + # There may be a bug in the above statement; not returning the correct index + tm.assert_frame_equal(result.reset_index(drop=True), expected) + + @pytest.mark.parametrize( + "method, expected_data", + [["corr", [np.nan, 1.0, 1.0, 1]], ["cov", [np.nan, 0.5, 0.928571, 1.385714]]], + ) + def test_pairwise_methods(self, method, expected_data): + # GH 16037 + df = DataFrame({"A": ["a"] * 4, "B": range(4)}) + result = getattr(df.groupby("A").ewm(com=1.0), method)() + expected = DataFrame( + {"B": expected_data}, + index=MultiIndex.from_tuples( + [ + ("a", 0, "B"), + ("a", 1, "B"), + ("a", 2, "B"), + ("a", 3, "B"), + ], + names=["A", None, None], + ), + ) + tm.assert_frame_equal(result, expected) + + expected = df.groupby("A").apply(lambda x: getattr(x.ewm(com=1.0), method)()) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py index 35bdb972a7bc0..3dd09bc4b752a 100644 --- a/pandas/tests/window/test_numba.py +++ b/pandas/tests/window/test_numba.py @@ -3,7 +3,7 @@ import pandas.util._test_decorators as td -from pandas import Series, option_context +from pandas import DataFrame, Series, option_context import pandas._testing as tm from pandas.core.util.numba_ import NUMBA_FUNC_CACHE @@ -11,7 +11,7 @@ @td.skip_if_no("numba", "0.46.0") @pytest.mark.filterwarnings("ignore:\\nThe keyword argument") # Filter warnings when parallel=True and the function can't be parallelized by Numba -class TestApply: +class TestRollingApply: @pytest.mark.parametrize("jit", [True, False]) def test_numba_vs_cython(self, jit, nogil, parallel, nopython, center): def f(x, *args): @@ -77,6 +77,31 @@ def func_2(x): tm.assert_series_equal(result, expected) +@td.skip_if_no("numba", "0.46.0") +class TestGroupbyEWMMean: + def test_invalid_engine(self): + df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)}) + with pytest.raises(ValueError, match="engine must be either"): + df.groupby("A").ewm(com=1.0).mean(engine="foo") + + def test_invalid_engine_kwargs(self): + df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)}) + with pytest.raises(ValueError, match="cython engine does not"): + df.groupby("A").ewm(com=1.0).mean( + engine="cython", engine_kwargs={"nopython": True} + ) + + def test_cython_vs_numba(self, nogil, parallel, nopython, ignore_na, adjust): + df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)}) + gb_ewm = df.groupby("A").ewm(com=1.0, adjust=adjust, ignore_na=ignore_na) + + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + result = gb_ewm.mean(engine="numba", engine_kwargs=engine_kwargs) + expected = gb_ewm.mean(engine="cython") + + tm.assert_frame_equal(result, expected) + + @td.skip_if_no("numba", "0.46.0") def test_use_global_config(): def f(x):
- [x] closes #16037 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Additionally enables a Numba engine for `groupby(...).ewm(...).mean(...)` ``` In [1]: df = pd.DataFrame({"A": range(10_000), "B": range(10_000)}) In [2]: gb_ewm = df.groupby("A").ewm(com=1.0) --cache time first In [3]: %timeit -r 1 -n 1 gb_ewm.mean(engine='numba') 1.02 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each) In [4]: %timeit gb_ewm.mean(engine='numba') 578 ms ± 28.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) In [5]: %timeit df.groupby('A').apply(lambda x: x.ewm(com=1.0).mean()) 3.43 s ± 208 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) In [6]: %timeit gb_ewm.mean(engine='cython') 4.19 s ± 204 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/37878
2020-11-15T22:12:32Z
2020-11-18T00:36:49Z
2020-11-18T00:36:48Z
2020-11-18T01:38:45Z
DEPR: Index.asi8
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index e00b177f2a2fc..23c2a4c734245 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -474,7 +474,8 @@ Deprecations - :class:`Index` methods ``&``, ``|``, and ``^`` behaving as the set operations :meth:`Index.intersection`, :meth:`Index.union`, and :meth:`Index.symmetric_difference`, respectively, are deprecated and in the future will behave as pointwise boolean operations matching :class:`Series` behavior. Use the named set methods instead (:issue:`36758`) - :meth:`Categorical.is_dtype_equal` and :meth:`CategoricalIndex.is_dtype_equal` are deprecated, will be removed in a future version (:issue:`37545`) - :meth:`Series.slice_shift` and :meth:`DataFrame.slice_shift` are deprecated, use :meth:`Series.shift` or :meth:`DataFrame.shift` instead (:issue:`37601`) -- Partial slicing on unordered :class:`DatetimeIndexes` with keys, which are not in Index is deprecated and will be removed in a future version (:issue:`18531`) +- Partial slicing on unordered :class:`DatetimeIndex` with keys, which are not in Index is deprecated and will be removed in a future version (:issue:`18531`) +- Deprecated :meth:`Index.asi8` for :class:`Index` subclasses other than :class:`DatetimeIndex`, :class:`TimedeltaIndex`, and :class:`PeriodIndex` (:issue:`37877`) - The ``inplace`` parameter of :meth:`Categorical.remove_unused_categories` is deprecated and will be removed in a future version (:issue:`37643`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 1b3e4864843f3..5209d83ade309 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -415,6 +415,11 @@ def asi8(self): ndarray An ndarray with int64 dtype. """ + warnings.warn( + "Index.asi8 is deprecated and will be removed in a future version", + FutureWarning, + stacklevel=2, + ) return None @classmethod @@ -4738,12 +4743,13 @@ def argsort(self, *args, **kwargs) -> np.ndarray: >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ - result = self.asi8 - - if result is None: - result = np.array(self) + if needs_i8_conversion(self.dtype): + # TODO: these do not match the underlying EA argsort methods GH#37863 + return self.asi8.argsort(*args, **kwargs) - return result.argsort(*args, **kwargs) + # This works for either ndarray or EA, is overriden + # by RangeIndex, MultIIndex + return self._data.argsort(*args, **kwargs) @final def get_value(self, series: "Series", key): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 9eb8a8b719d41..9dacdd6dea9ca 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -1,5 +1,6 @@ import operator from typing import Any +import warnings import numpy as np @@ -266,6 +267,11 @@ def inferred_type(self) -> str: @property def asi8(self) -> np.ndarray: # do not cache or you'll create a memory leak + warnings.warn( + "Index.asi8 is deprecated and will be removed in a future version", + FutureWarning, + stacklevel=2, + ) return self._values.view(self._default_dtype) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index d52a8ae935688..0b0f985697da9 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -6,7 +6,7 @@ from pandas._libs import index as libindex from pandas._libs.tslibs import BaseOffset, Period, Resolution, Tick from pandas._libs.tslibs.parsing import DateParseError, parse_time_string -from pandas._typing import DtypeObj, Label +from pandas._typing import DtypeObj from pandas.errors import InvalidIndexError from pandas.util._decorators import Appender, cache_readonly, doc diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index 32ca83787c4c1..4af32b219d380 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -10,6 +10,7 @@ is_number, is_numeric_dtype, is_scalar, + needs_i8_conversion, ) from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries @@ -123,8 +124,9 @@ def to_numeric(arg, errors="raise", downcast=None): values = arg.values elif isinstance(arg, ABCIndexClass): is_index = True - values = arg.asi8 - if values is None: + if needs_i8_conversion(arg.dtype): + values = arg.asi8 + else: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype="O") diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index e74ae5311125e..51a1e2102c273 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -337,6 +337,13 @@ def _get_roll_func(self, func_name: str) -> Callable[..., Any]: ) return window_func + @property + def _index_array(self): + # TODO: why do we get here with e.g. MultiIndex? + if needs_i8_conversion(self._on.dtype): + return self._on.asi8 + return None + def _get_window_indexer(self) -> BaseIndexer: """ Return an indexer class that will compute the window start and end bounds @@ -345,7 +352,7 @@ def _get_window_indexer(self) -> BaseIndexer: return self.window if self.is_freq_type: return VariableWindowIndexer( - index_array=self._on.asi8, window_size=self.window + index_array=self._index_array, window_size=self.window ) return FixedWindowIndexer(window_size=self.window) @@ -2140,7 +2147,7 @@ def _get_window_indexer(self) -> GroupbyIndexer: """ rolling_indexer: Type[BaseIndexer] indexer_kwargs: Optional[Dict[str, Any]] = None - index_array = self._on.asi8 + index_array = self._index_array window = self.window if isinstance(self.window, BaseIndexer): rolling_indexer = type(self.window) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index d15b560419f6d..a10be99dff076 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -13,7 +13,16 @@ from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd -from pandas import CategoricalIndex, MultiIndex, RangeIndex +from pandas import ( + CategoricalIndex, + DatetimeIndex, + Int64Index, + MultiIndex, + PeriodIndex, + RangeIndex, + TimedeltaIndex, + UInt64Index, +) import pandas._testing as tm @@ -348,6 +357,18 @@ def test_ravel_deprecation(self, index): with tm.assert_produces_warning(FutureWarning): index.ravel() + def test_asi8_deprecation(self, index): + # GH#37877 + if isinstance( + index, (Int64Index, UInt64Index, DatetimeIndex, TimedeltaIndex, PeriodIndex) + ): + warn = None + else: + warn = FutureWarning + + with tm.assert_produces_warning(warn): + index.asi8 + @pytest.mark.parametrize("na_position", [None, "middle"]) def test_sort_values_invalid_na_position(index_with_missing, na_position):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/37877
2020-11-15T22:10:38Z
2020-11-19T00:22:36Z
2020-11-19T00:22:36Z
2020-11-19T00:27:24Z
CLN: Adding comment/more info about a possible alternative approach to murmur2-hash
diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h index aebc229abddd2..c04fe1899649d 100644 --- a/pandas/_libs/src/klib/khash_python.h +++ b/pandas/_libs/src/klib/khash_python.h @@ -16,6 +16,11 @@ // GH 13436 showed that _Py_HashDouble doesn't work well with khash // GH 28303 showed, that the simple xoring-version isn't good enough // See GH 36729 for evaluation of the currently used murmur2-hash version +// An interesting alternative to expensive murmur2-hash would be to change +// the probing strategy and use e.g. the probing strategy from CPython's +// implementation of dicts, which shines for smaller sizes but is more +// predisposed to superlinear running times (see GH 36729 for comparison) + khint64_t PANDAS_INLINE asint64(double key) { khint64_t val;
Small comment as requested here: https://github.com/pandas-dev/pandas/pull/36729#discussion_r522956690
https://api.github.com/repos/pandas-dev/pandas/pulls/37876
2020-11-15T21:47:40Z
2020-11-15T23:12:27Z
2020-11-15T23:12:27Z
2020-11-15T23:12:32Z
BUG in reindex raises IndexingError when df is empty sometimes
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 28f7df98cb86b..30d6095639a00 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -584,6 +584,7 @@ Indexing - Bug in :meth:`DataFrame.loc` returned requested key plus missing values when ``loc`` was applied to single level from :class:`MultiIndex` (:issue:`27104`) - Bug in indexing on a :class:`Series` or :class:`DataFrame` with a :class:`CategoricalIndex` using a listlike indexer containing NA values (:issue:`37722`) - Bug in :meth:`DataFrame.xs` ignored ``droplevel=False`` for columns (:issue:`19056`) +- Bug in :meth:`DataFrame.reindex` raising ``IndexingError`` wrongly for empty :class:`DataFrame` with ``tolerance`` not None or ``method="nearest"`` (:issue:`27315`) Missing ^^^^^^^ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index cb5641a74e60b..30b582f854b57 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3163,7 +3163,7 @@ def _get_fill_indexer( indexer = engine_method(target_values, limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) - if tolerance is not None: + if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target_values, indexer, tolerance) return indexer @@ -3208,6 +3208,9 @@ def _get_nearest_indexer(self, target: "Index", limit, tolerance) -> np.ndarray: values that can be subtracted from each other (e.g., not strings or tuples). """ + if not len(self): + return self._get_fill_indexer(target, "pad") + left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index 113a80f1c5c4e..3e4e16955b44a 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta import inspect from itertools import permutations @@ -874,3 +874,20 @@ def test_reindex_multiindex_ffill_added_rows(self): result = df.reindex(mi2, axis=0, method="ffill") expected = DataFrame([[0, 7], [3, 4], [3, 4]], index=mi2, columns=["x", "y"]) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "kwargs", + [ + {"method": "pad", "tolerance": timedelta(seconds=9)}, + {"method": "backfill", "tolerance": timedelta(seconds=9)}, + {"method": "nearest"}, + {"method": None}, + ], + ) + def test_reindex_empty_frame(self, kwargs): + # GH#27315 + idx = date_range(start="2020", freq="30s", periods=3) + df = DataFrame([], index=Index([], name="time"), columns=["a"]) + result = df.reindex(idx, **kwargs) + expected = DataFrame({"a": [pd.NA] * 3}, index=idx) + tm.assert_frame_equal(result, expected)
- [x] closes #27315 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Build on top of #27326
https://api.github.com/repos/pandas-dev/pandas/pulls/37874
2020-11-15T21:28:31Z
2020-11-17T01:26:21Z
2020-11-17T01:26:21Z
2020-11-17T08:35:37Z
BUG: __getitem__ raise blank KeyError for IntervalIndex and missing keys
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 27511c96faa5a..20ca67c0ad111 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -627,6 +627,7 @@ Indexing - Bug in :meth:`DataFrame.loc` raising ``TypeError`` when non-integer slice was given to select values from :class:`MultiIndex` (:issue:`25165`, :issue:`24263`) - Bug in :meth:`DataFrame.loc` returning and assigning elements in wrong order when indexer is differently ordered than the :class:`MultiIndex` to filter (:issue:`31330`, :issue:`34603`) - Bug in :meth:`DataFrame.loc` and :meth:`DataFrame.__getitem__` raising ``KeyError`` when columns were :class:`MultiIndex` with only one level (:issue:`29749`) +- Bug in :meth:`Series.__getitem__` and :meth:`DataFrame.__getitem__` raising blank ``KeyError`` without missing keys for :class:`IntervalIndex` (:issue:`27365`) Missing ^^^^^^^ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 98752a21e44a2..986c4d2c59723 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -806,7 +806,7 @@ def _convert_list_indexer(self, keyarr): # we have missing values if (locs == -1).any(): - raise KeyError + raise KeyError(keyarr[locs == -1].tolist()) return locs diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index 0fa6abb27cb61..f4e7296598d54 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -65,10 +65,10 @@ def test_non_matching(self): # this is a departure from our current # indexing scheme, but simpler - with pytest.raises(KeyError, match="^$"): + with pytest.raises(KeyError, match=r"^\[-1\]$"): s.loc[[-1, 3, 4, 5]] - with pytest.raises(KeyError, match="^$"): + with pytest.raises(KeyError, match=r"^\[-1\]$"): s.loc[[-1, 3]] @pytest.mark.arm_slow @@ -107,11 +107,11 @@ def test_loc_getitem_frame(self): expected = df.take([4, 5, 4, 5]) tm.assert_frame_equal(result, expected) - with pytest.raises(KeyError, match="^$"): + with pytest.raises(KeyError, match=r"^\[10\]$"): df.loc[[10]] # partial missing - with pytest.raises(KeyError, match="^$"): + with pytest.raises(KeyError, match=r"^\[10\]$"): df.loc[[10, 4]] diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py index 03c3034772bc6..a9512bc97d9de 100644 --- a/pandas/tests/indexing/interval/test_interval_new.py +++ b/pandas/tests/indexing/interval/test_interval_new.py @@ -204,13 +204,13 @@ def test_loc_with_overlap(self): with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): s.loc[Interval(3, 5)] - with pytest.raises(KeyError, match="^$"): + with pytest.raises(KeyError, match=r"^\[Interval\(3, 5, closed='right'\)\]$"): s.loc[[Interval(3, 5)]] with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): s[Interval(3, 5)] - with pytest.raises(KeyError, match="^$"): + with pytest.raises(KeyError, match=r"^\[Interval\(3, 5, closed='right'\)\]$"): s[[Interval(3, 5)]] # slices with interval (only exact matches) @@ -266,3 +266,11 @@ def test_non_unique_moar(self): expected = s.iloc[[0, 1]] result = s[[Interval(1, 3)]] tm.assert_series_equal(expected, result) + + def test_missing_key_error_message(self, frame_or_series): + # GH#27365 + obj = frame_or_series( + np.arange(5), index=IntervalIndex.from_breaks(np.arange(6)) + ) + with pytest.raises(KeyError, match=r"\[6\]"): + obj.loc[[4, 5, 6]]
- [x] closes #27365 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Without ``tolist`` error would look like ``KeyError: array([6])``
https://api.github.com/repos/pandas-dev/pandas/pulls/37873
2020-11-15T20:46:31Z
2020-11-26T00:40:36Z
2020-11-26T00:40:36Z
2020-11-26T21:45:48Z