title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CI: Revert disable of some window test on Windows
diff --git a/ci/run_tests.sh b/ci/run_tests.sh index 261d6364cb5e1..0d6f26d8c29f8 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -24,7 +24,7 @@ PYTEST_CMD="${XVFB}pytest -m \"$PATTERN\" -n $PYTEST_WORKERS --dist=loadfile $TE if [[ $(uname) != "Linux" && $(uname) != "Darwin" ]]; then # GH#37455 windows py38 build appears to be running out of memory # skip collection of window tests - PYTEST_CMD="$PYTEST_CMD --ignore=pandas/tests/window/ --ignore=pandas/tests/plotting/" + PYTEST_CMD="$PYTEST_CMD --ignore=pandas/tests/window/moments --ignore=pandas/tests/plotting/" fi echo $PYTEST_CMD
- [x] xref #37535 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41481
2021-05-15T03:18:46Z
2021-05-17T15:18:37Z
2021-05-17T15:18:37Z
2021-05-17T16:16:35Z
DEPR: dropping nuisance columns in DataFrame reductions
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 6dd011c588702..36b591c3c3142 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -381,6 +381,7 @@ this pathological behavior (:issue:`37827`): *New behavior*: .. ipython:: python + :okwarning: df.mean() @@ -394,6 +395,7 @@ instead of casting to a NumPy array which may have different semantics (:issue:` :issue:`28949`, :issue:`21020`). .. ipython:: python + :okwarning: ser = pd.Series([0, 1], dtype="category", name="A") df = ser.to_frame() @@ -411,6 +413,7 @@ instead of casting to a NumPy array which may have different semantics (:issue:` *New behavior*: .. ipython:: python + :okwarning: df.any() diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index c26f8288f59ab..5b92fbb730b35 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -678,6 +678,47 @@ Deprecations - Deprecated passing arguments as positional (except for ``"method"``) in :meth:`DataFrame.interpolate` and :meth:`Series.interpolate` (:issue:`41485`) - Deprecated passing arguments (apart from ``value``) as positional in :meth:`DataFrame.fillna` and :meth:`Series.fillna` (:issue:`41485`) +.. _whatsnew_130.deprecations.nuisance_columns: + +Deprecated Dropping Nuisance Columns in DataFrame Reductions and DataFrameGroupBy Operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The default of calling a reduction (.min, .max, .sum, ...) on a :class:`DataFrame` with +``numeric_only=None`` (the default, columns on which the reduction raises ``TypeError`` +are silently ignored and dropped from the result. + +This behavior is deprecated. In a future version, the ``TypeError`` will be raised, +and users will need to select only valid columns before calling the function. + +For example: + +.. ipython:: python + + df = pd.DataFrame({"A": [1, 2, 3, 4], "B": pd.date_range("2016-01-01", periods=4)}) + df + +*Old behavior*: + +.. code-block:: ipython + + In [3]: df.prod() + Out[3]: + Out[3]: + A 24 + dtype: int64 + +*Future behavior*: + +.. code-block:: ipython + + In [4]: df.prod() + ... + TypeError: 'DatetimeArray' does not implement reduction 'prod' + + In [5]: df[["A"]].prod() + Out[5]: + A 24 + dtype: int64 + .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e55b3984b1c39..18ee1ad9bcd96 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9854,6 +9854,21 @@ def _get_data() -> DataFrame: # Even if we are object dtype, follow numpy and return # float64, see test_apply_funcs_over_empty out = out.astype(np.float64) + + if numeric_only is None and out.shape[0] != df.shape[1]: + # columns have been dropped GH#41480 + arg_name = "numeric_only" + if name in ["all", "any"]: + arg_name = "bool_only" + warnings.warn( + "Dropping of nuisance columns in DataFrame reductions " + f"(with '{arg_name}=None') is deprecated; in a future " + "version this will raise TypeError. Select only valid " + "columns before calling the reduction.", + FutureWarning, + stacklevel=5, + ) + return out assert numeric_only is None @@ -9874,6 +9889,19 @@ def _get_data() -> DataFrame: with np.errstate(all="ignore"): result = func(values) + # columns have been dropped GH#41480 + arg_name = "numeric_only" + if name in ["all", "any"]: + arg_name = "bool_only" + warnings.warn( + "Dropping of nuisance columns in DataFrame reductions " + f"(with '{arg_name}=None') is deprecated; in a future " + "version this will raise TypeError. Select only valid " + "columns before calling the reduction.", + FutureWarning, + stacklevel=5, + ) + if hasattr(result, "dtype"): if filter_type == "bool" and notna(result).all(): result = result.astype(np.bool_) diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index cc91cdae942fd..2511f6fc2563c 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -1209,7 +1209,10 @@ def test_nuiscance_columns(): ) tm.assert_frame_equal(result, expected) - result = df.agg("sum") + with tm.assert_produces_warning( + FutureWarning, match="Select only valid", check_stacklevel=False + ): + result = df.agg("sum") expected = Series([6, 6.0, "foobarbaz"], index=["A", "B", "C"]) tm.assert_series_equal(result, expected) @@ -1426,8 +1429,9 @@ def test_apply_datetime_tz_issue(): @pytest.mark.parametrize("method", ["min", "max", "sum"]) def test_consistency_of_aggregates_of_columns_with_missing_values(df, method): # GH 16832 - none_in_first_column_result = getattr(df[["A", "B"]], method)() - none_in_second_column_result = getattr(df[["B", "A"]], method)() + with tm.assert_produces_warning(FutureWarning, match="Select only valid"): + none_in_first_column_result = getattr(df[["A", "B"]], method)() + none_in_second_column_result = getattr(df[["B", "A"]], method)() tm.assert_series_equal(none_in_first_column_result, none_in_second_column_result) diff --git a/pandas/tests/apply/test_invalid_arg.py b/pandas/tests/apply/test_invalid_arg.py index 698f85a04a757..83a1baa9d13d6 100644 --- a/pandas/tests/apply/test_invalid_arg.py +++ b/pandas/tests/apply/test_invalid_arg.py @@ -342,6 +342,7 @@ def test_transform_wont_agg_series(string_series, func): @pytest.mark.parametrize( "op_wrapper", [lambda x: x, lambda x: [x], lambda x: {"A": x}, lambda x: {"A": [x]}] ) +@pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_transform_reducer_raises(all_reductions, frame_or_series, op_wrapper): # GH 35964 op = op_wrapper(all_reductions) diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index aca061cdd197b..c01195a6afff1 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -56,7 +56,8 @@ def test_quantile(self, datetime_frame): # non-numeric exclusion df = DataFrame({"col1": ["A", "A", "B", "B"], "col2": [1, 2, 3, 4]}) rs = df.quantile(0.5) - xp = df.median().rename(0.5) + with tm.assert_produces_warning(FutureWarning, match="Select only valid"): + xp = df.median().rename(0.5) tm.assert_series_equal(rs, xp) # axis diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py index 6538eda8cdeff..5ba4ab4408f11 100644 --- a/pandas/tests/frame/methods/test_rank.py +++ b/pandas/tests/frame/methods/test_rank.py @@ -248,6 +248,7 @@ def test_rank_methods_frame(self): @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("dtype", ["O", "f8", "i8"]) + @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_rank_descending(self, method, dtype): if "i" in dtype: diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index b9f6e72acf71b..7fe921571ee2e 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1021,6 +1021,7 @@ def test_zero_len_frame_with_series_corner_cases(): tm.assert_frame_equal(result, expected) +@pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_frame_single_columns_object_sum_axis_1(): # GH 13758 data = { diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 0ca523db60889..564f5d20b0301 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -8,6 +8,8 @@ from pandas.compat import is_platform_windows import pandas.util._test_decorators as td +from pandas.core.dtypes.common import is_categorical_dtype + import pandas as pd from pandas import ( Categorical, @@ -90,7 +92,7 @@ def wrapper(x): tm.assert_series_equal( result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol ) - # HACK: win32 + # FIXME: HACK: win32 tm.assert_series_equal( result1, frame.apply(wrapper, axis=1), @@ -140,7 +142,7 @@ def wrapper(x): tm.assert_series_equal(r1, expected) -def assert_stat_op_api(opname, float_frame, float_string_frame, has_numeric_only=False): +def assert_stat_op_api(opname, float_frame, float_string_frame, has_numeric_only=True): """ Check that API for operator opname works as advertised on frame @@ -199,7 +201,7 @@ def wrapper(x): tm.assert_series_equal(result0, frame.apply(wrapper)) tm.assert_series_equal( result1, frame.apply(wrapper, axis=1), check_dtype=False - ) # HACK: win32 + ) # FIXME: HACK: win32 else: skipna_wrapper = alternative wrapper = alternative @@ -249,6 +251,7 @@ def assert_bool_op_api( # make sure op works on mixed-type frame mixed = float_string_frame mixed["_bool_"] = np.random.randn(len(mixed)) > 0.5 + getattr(mixed, opname)(axis=0) getattr(mixed, opname)(axis=1) @@ -264,21 +267,22 @@ class TestDataFrameAnalytics: # --------------------------------------------------------------------- # Reductions + @pytest.mark.filterwarnings("ignore:Dropping of nuisance:FutureWarning") def test_stat_op_api(self, float_frame, float_string_frame): + assert_stat_op_api("count", float_frame, float_string_frame) + assert_stat_op_api("sum", float_frame, float_string_frame) + assert_stat_op_api( - "count", float_frame, float_string_frame, has_numeric_only=True - ) - assert_stat_op_api( - "sum", float_frame, float_string_frame, has_numeric_only=True + "nunique", float_frame, float_string_frame, has_numeric_only=False ) - - assert_stat_op_api("nunique", float_frame, float_string_frame) assert_stat_op_api("mean", float_frame, float_string_frame) assert_stat_op_api("product", float_frame, float_string_frame) assert_stat_op_api("median", float_frame, float_string_frame) assert_stat_op_api("min", float_frame, float_string_frame) assert_stat_op_api("max", float_frame, float_string_frame) - assert_stat_op_api("mad", float_frame, float_string_frame) + assert_stat_op_api( + "mad", float_frame, float_string_frame, has_numeric_only=False + ) assert_stat_op_api("var", float_frame, float_string_frame) assert_stat_op_api("std", float_frame, float_string_frame) assert_stat_op_api("sem", float_frame, float_string_frame) @@ -435,12 +439,17 @@ def test_mixed_ops(self, op): "str": ["a", "b", "c", "d"], } ) - - result = getattr(df, op)() + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = getattr(df, op)() assert len(result) == 2 with pd.option_context("use_bottleneck", False): - result = getattr(df, op)() + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = getattr(df, op)() assert len(result) == 2 def test_reduce_mixed_frame(self): @@ -457,7 +466,8 @@ def test_reduce_mixed_frame(self): tm.assert_numpy_array_equal( test.values, np.array([2, 150, "abcde"], dtype=object) ) - tm.assert_series_equal(test, df.T.sum(axis=1)) + alt = df.T.sum(axis=1) + tm.assert_series_equal(test, alt) def test_nunique(self): df = DataFrame({"A": [1, 1, 1], "B": [1, 2, 3], "C": [1, np.nan, 3]}) @@ -510,7 +520,10 @@ def test_mean_mixed_string_decimal(self): df = DataFrame(d) - result = df.mean() + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = df.mean() expected = Series([2.7, 681.6], index=["A", "C"]) tm.assert_series_equal(result, expected) @@ -740,7 +753,8 @@ def test_operators_timedelta64(self): tm.assert_series_equal(result, expected) # excludes numeric - result = mixed.min(axis=1) + with tm.assert_produces_warning(FutureWarning, match="Select only valid"): + result = mixed.min(axis=1) expected = Series([1, 1, 1.0], index=[0, 1, 2]) tm.assert_series_equal(result, expected) @@ -801,8 +815,9 @@ def test_sum_prod_nanops(self, method, unit): idx = ["a", "b", "c"] df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) # The default - result = getattr(df, method) + result = getattr(df, method)() expected = Series([unit, unit, unit], index=idx, dtype="float64") + tm.assert_series_equal(result, expected) # min_count=1 result = getattr(df, method)(min_count=1) @@ -873,20 +888,23 @@ def test_sum_mixed_datetime(self): df = DataFrame({"A": date_range("2000", periods=4), "B": [1, 2, 3, 4]}).reindex( [2, 3, 4] ) - result = df.sum() + with tm.assert_produces_warning(FutureWarning, match="Select only valid"): + result = df.sum() expected = Series({"B": 7.0}) tm.assert_series_equal(result, expected) def test_mean_corner(self, float_frame, float_string_frame): # unit test when have object data - the_mean = float_string_frame.mean(axis=0) + with tm.assert_produces_warning(FutureWarning, match="Select only valid"): + the_mean = float_string_frame.mean(axis=0) the_sum = float_string_frame.sum(axis=0, numeric_only=True) tm.assert_index_equal(the_sum.index, the_mean.index) assert len(the_mean.index) < len(float_string_frame.columns) # xs sum mixed type, just want to know it works... - the_mean = float_string_frame.mean(axis=1) + with tm.assert_produces_warning(FutureWarning, match="Select only valid"): + the_mean = float_string_frame.mean(axis=1) the_sum = float_string_frame.sum(axis=1, numeric_only=True) tm.assert_index_equal(the_sum.index, the_mean.index) @@ -947,10 +965,13 @@ def test_mean_extensionarray_numeric_only_true(self): def test_stats_mixed_type(self, float_string_frame): # don't blow up - float_string_frame.std(1) - float_string_frame.var(1) - float_string_frame.mean(1) - float_string_frame.skew(1) + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + float_string_frame.std(1) + float_string_frame.var(1) + float_string_frame.mean(1) + float_string_frame.skew(1) def test_sum_bools(self): df = DataFrame(index=range(1), columns=range(10)) @@ -1125,7 +1146,6 @@ def test_any_all_object_dtype(self, axis, bool_agg_func, skipna): [np.nan, np.nan, "5", np.nan], ] ) - result = getattr(df, bool_agg_func)(axis=axis, skipna=skipna) expected = Series([True, True, True, True]) tm.assert_series_equal(result, expected) @@ -1224,12 +1244,23 @@ def test_any_all_bool_only(self): def test_any_all_np_func(self, func, data, expected): # GH 19976 data = DataFrame(data) - result = func(data) + + warn = None + if any(is_categorical_dtype(x) for x in data.dtypes): + warn = FutureWarning + + with tm.assert_produces_warning( + warn, match="Select only valid columns", check_stacklevel=False + ): + result = func(data) assert isinstance(result, np.bool_) assert result.item() is expected # method version - result = getattr(DataFrame(data), func.__name__)(axis=None) + with tm.assert_produces_warning( + warn, match="Select only valid columns", check_stacklevel=False + ): + result = getattr(DataFrame(data), func.__name__)(axis=None) assert isinstance(result, np.bool_) assert result.item() is expected @@ -1349,7 +1380,6 @@ def test_min_max_dt64_with_NaT_skipna_false(self, request, tz_naive_fixture): "b": [Timestamp("2020-02-01 08:00:00", tz=tz), pd.NaT], } ) - res = df.min(axis=1, skipna=False) expected = Series([df.loc[0, "a"], pd.NaT]) assert expected.dtype == df["a"].dtype @@ -1411,12 +1441,12 @@ def test_frame_any_all_with_level(self): ], ) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, match="Using the level"): result = df.any(level=0) ex = DataFrame({"data": [False, True]}, index=["one", "two"]) tm.assert_frame_equal(result, ex) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, match="Using the level"): result = df.all(level=0) ex = DataFrame({"data": [False, False]}, index=["one", "two"]) tm.assert_frame_equal(result, ex) @@ -1463,7 +1493,7 @@ def test_reductions_deprecation_level_argument(self, frame_or_series, func): obj = frame_or_series( [1, 2, 3], index=MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]]) ) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, match="level"): getattr(obj, func)(level=0) @@ -1486,11 +1516,17 @@ def test_any_all_categorical_dtype_nuisance_column(self, method): # With bool_only=None, operating on this column raises and is ignored, # so we expect an empty result. - result = getattr(df, method)(bool_only=None) + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = getattr(df, method)(bool_only=None) expected = Series([], index=Index([]), dtype=bool) tm.assert_series_equal(result, expected) - result = getattr(np, method)(df, axis=0) + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns", check_stacklevel=False + ): + result = getattr(np, method)(df, axis=0) tm.assert_series_equal(result, expected) def test_median_categorical_dtype_nuisance_column(self): @@ -1505,7 +1541,10 @@ def test_median_categorical_dtype_nuisance_column(self): with pytest.raises(TypeError, match="does not implement reduction"): df.median(numeric_only=False) - result = df.median() + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = df.median() expected = Series([], index=Index([]), dtype=np.float64) tm.assert_series_equal(result, expected) @@ -1515,7 +1554,10 @@ def test_median_categorical_dtype_nuisance_column(self): with pytest.raises(TypeError, match="does not implement reduction"): df.median(numeric_only=False) - result = df.median() + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = df.median() expected = Series([2.0], index=["B"]) tm.assert_series_equal(result, expected) @@ -1539,23 +1581,35 @@ def test_min_max_categorical_dtype_non_ordered_nuisance_column(self, method): with pytest.raises(TypeError, match="is not ordered for operation"): getattr(df, method)(numeric_only=False) - result = getattr(df, method)() + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = getattr(df, method)() expected = Series([], index=Index([]), dtype=np.float64) tm.assert_series_equal(result, expected) - result = getattr(np, method)(df) + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns", check_stacklevel=False + ): + result = getattr(np, method)(df) tm.assert_series_equal(result, expected) # same thing, but with an additional non-categorical column df["B"] = df["A"].astype(object) - result = getattr(df, method)() + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = getattr(df, method)() if method == "min": expected = Series(["a"], index=["B"]) else: expected = Series(["c"], index=["B"]) tm.assert_series_equal(result, expected) - result = getattr(np, method)(df) + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns", check_stacklevel=False + ): + result = getattr(np, method)(df) tm.assert_series_equal(result, expected) def test_reduction_object_block_splits_nuisance_columns(self): @@ -1563,14 +1617,20 @@ def test_reduction_object_block_splits_nuisance_columns(self): df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", "c"]}, dtype=object) # We should only exclude "B", not "A" - result = df.mean() + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = df.mean() expected = Series([1.0], index=["A"]) tm.assert_series_equal(result, expected) # Same behavior but heterogeneous dtype df["C"] = df["A"].astype(int) + 4 - result = df.mean() + with tm.assert_produces_warning( + FutureWarning, match="Select only valid columns" + ): + result = df.mean() expected = Series([1.0, 5.0], index=["A", "C"]) tm.assert_series_equal(result, expected) @@ -1644,6 +1704,7 @@ def test_groupy_regular_arithmetic_equivalent(meth): def test_frame_mixed_numeric_object_with_timestamp(ts_value): # GH 13912 df = DataFrame({"a": [1], "b": [1.1], "c": ["foo"], "d": [ts_value]}) - result = df.sum() + with tm.assert_produces_warning(FutureWarning, match="Dropping of nuisance"): + result = df.sum() expected = Series([1, 1.1, "foo"], index=list("abc")) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 3214290465832..42474ff00ad6d 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -567,6 +567,7 @@ def stretch(row): assert not isinstance(result, tm.SubclassedDataFrame) tm.assert_series_equal(result, expected) + @pytest.mark.filterwarnings("ignore:.*None will no longer:FutureWarning") def test_subclassed_reductions(self, all_reductions): # GH 25596 diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 2f87f4a19b93f..cf4127da79bf9 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -1003,7 +1003,8 @@ def test_apply_function_with_indexing_return_column(): "foo2": [1, 2, 4, 4, 5, 6], } ) - result = df.groupby("foo1", as_index=False).apply(lambda x: x.mean()) + with tm.assert_produces_warning(FutureWarning, match="Select only valid"): + result = df.groupby("foo1", as_index=False).apply(lambda x: x.mean()) expected = DataFrame({"foo1": ["one", "three", "two"], "foo2": [3.0, 4.0, 4.0]}) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 217aa0d7ede17..8ce7841bcc2c2 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -282,7 +282,10 @@ def test_apply(ordered): # GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]]) # is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"]) # when we expect Series(0., index=["values"]) - result = grouped.apply(lambda x: np.mean(x)) + with tm.assert_produces_warning( + FutureWarning, match="Select only valid", check_stacklevel=False + ): + result = grouped.apply(lambda x: np.mean(x)) tm.assert_frame_equal(result, expected) result = grouped.mean() @@ -1289,6 +1292,7 @@ def test_groupby_categorical_axis_1(code): tm.assert_frame_equal(result, expected) +@pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_groupby_cat_preserves_structure(observed, ordered): # GH 28787 df = DataFrame( diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 3f43c34b6eb34..4fa21a259e7cb 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -333,6 +333,7 @@ def gni(self, df): return gni # TODO: non-unique columns, as_index=False + @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_idxmax(self, gb): # object dtype so idxmax goes through _aggregate_item_by_item # GH#5610 @@ -342,6 +343,7 @@ def test_idxmax(self, gb): result = gb.idxmax() tm.assert_frame_equal(result, expected) + @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_idxmin(self, gb): # object dtype so idxmax goes through _aggregate_item_by_item # GH#5610 @@ -524,6 +526,7 @@ def test_groupby_non_arithmetic_agg_int_like_precision(i): ("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}), ], ) +@pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_idxmin_idxmax_returns_int_types(func, values): # GH 25444 df = DataFrame( diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index afa28866f0833..c37dc17b85dd2 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1757,6 +1757,7 @@ def test_pivot_table_values_key_error(): @pytest.mark.parametrize( "op", ["idxmax", "idxmin", "mad", "min", "max", "sum", "prod", "skew"] ) +@pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_empty_groupby(columns, keys, values, method, op, request): # GH8093 & GH26411 override_dtype = None
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Discussed on this week's call
https://api.github.com/repos/pandas-dev/pandas/pulls/41480
2021-05-15T01:37:10Z
2021-05-21T20:59:56Z
2021-05-21T20:59:56Z
2021-05-21T21:18:11Z
REF: Various things preparing instantiable NumericIndex
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 83946491f32a8..08ca84228a301 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -347,7 +347,7 @@ def _outer_indexer( joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx - _typ = "index" + _typ: str = "index" _data: ExtensionArray | np.ndarray _id: object | None = None _name: Hashable = None @@ -355,11 +355,11 @@ def _outer_indexer( # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False - _comparables = ["name"] - _attributes = ["name"] - _is_numeric_dtype = False - _can_hold_na = True - _can_hold_strings = True + _comparables: list[str] = ["name"] + _attributes: list[str] = ["name"] + _is_numeric_dtype: bool = False + _can_hold_na: bool = True + _can_hold_strings: bool = True # would we like our indexing holder to defer to us _defer_to_indexing = False @@ -5465,7 +5465,7 @@ def map(self, mapper, na_action=None): """ from pandas.core.indexes.multi import MultiIndex - new_values = super()._map_values(mapper, na_action=na_action) + new_values = self._map_values(mapper, na_action=na_action) attributes = self._get_attributes_dict() diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py index 4c2c38df601ce..c796a25faf0a6 100644 --- a/pandas/tests/indexes/numeric/test_numeric.py +++ b/pandas/tests/indexes/numeric/test_numeric.py @@ -18,12 +18,12 @@ class TestFloat64Index(NumericBase): _index_cls = Float64Index - @pytest.fixture + @pytest.fixture(params=[np.float64]) def dtype(self, request): - return np.float64 + return request.param @pytest.fixture( - params=["int64", "uint64", "category", "datetime64"], + params=["int64", "uint64", "category", "datetime64", "object"], ) def invalid_dtype(self, request): return request.param @@ -42,16 +42,16 @@ def simple_index(self, dtype): ], ids=["mixed", "float", "mixed_dec", "float_dec"], ) - def index(self, request): - return self._index_cls(request.param) + def index(self, request, dtype): + return self._index_cls(request.param, dtype=dtype) @pytest.fixture - def mixed_index(self): - return self._index_cls([1.5, 2, 3, 4, 5]) + def mixed_index(self, dtype): + return self._index_cls([1.5, 2, 3, 4, 5], dtype=dtype) @pytest.fixture - def float_index(self): - return self._index_cls([0.0, 2.5, 5.0, 7.5, 10.0]) + def float_index(self, dtype): + return self._index_cls([0.0, 2.5, 5.0, 7.5, 10.0], dtype=dtype) def test_repr_roundtrip(self, index): tm.assert_index_equal(eval(repr(index)), index) @@ -72,22 +72,23 @@ def test_constructor(self, dtype): index_cls = self._index_cls # explicit construction - index = index_cls([1, 2, 3, 4, 5]) + index = index_cls([1, 2, 3, 4, 5], dtype=dtype) assert isinstance(index, index_cls) - assert index.dtype.type is dtype + assert index.dtype == dtype expected = np.array([1, 2, 3, 4, 5], dtype=dtype) tm.assert_numpy_array_equal(index.values, expected) - index = index_cls(np.array([1, 2, 3, 4, 5])) + + index = index_cls(np.array([1, 2, 3, 4, 5]), dtype=dtype) assert isinstance(index, index_cls) assert index.dtype == dtype - index = index_cls([1.0, 2, 3, 4, 5]) + index = index_cls([1.0, 2, 3, 4, 5], dtype=dtype) assert isinstance(index, index_cls) assert index.dtype == dtype - index = index_cls(np.array([1.0, 2, 3, 4, 5])) + index = index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) assert isinstance(index, index_cls) assert index.dtype == dtype @@ -100,13 +101,13 @@ def test_constructor(self, dtype): assert index.dtype == dtype # nan handling - result = index_cls([np.nan, np.nan]) + result = index_cls([np.nan, np.nan], dtype=dtype) assert pd.isna(result.values).all() - result = index_cls(np.array([np.nan])) + result = index_cls(np.array([np.nan]), dtype=dtype) assert pd.isna(result.values).all() - result = Index(np.array([np.nan])) + result = Index(np.array([np.nan], dtype=dtype)) assert isinstance(result, index_cls) assert result.dtype == dtype assert pd.isna(result.values).all() @@ -281,7 +282,7 @@ class NumericInt(NumericBase): def test_view(self, dtype): index_cls = self._index_cls - idx = index_cls([], name="Foo") + idx = index_cls([], dtype=dtype, name="Foo") idx_view = idx.view() assert idx_view.name == "Foo" @@ -382,12 +383,12 @@ def test_prevent_casting(self, simple_index): class TestInt64Index(NumericInt): _index_cls = Int64Index - @pytest.fixture - def dtype(self): - return np.int64 + @pytest.fixture(params=[np.int64]) + def dtype(self, request): + return request.param @pytest.fixture( - params=["uint64", "float64", "category", "datetime64"], + params=["uint64", "float64", "category", "datetime64", "object"], ) def invalid_dtype(self, request): return request.param @@ -399,14 +400,14 @@ def simple_index(self, dtype): @pytest.fixture( params=[range(0, 20, 2), range(19, -1, -1)], ids=["index_inc", "index_dec"] ) - def index(self, request): - return self._index_cls(request.param) + def index(self, request, dtype): + return self._index_cls(request.param, dtype=dtype) def test_constructor(self, dtype): index_cls = self._index_cls # pass list, coerce fine - index = index_cls([-5, 0, 1, 2]) + index = index_cls([-5, 0, 1, 2], dtype=dtype) expected = Index([-5, 0, 1, 2], dtype=dtype) tm.assert_index_equal(index, expected) @@ -486,7 +487,7 @@ def dtype(self): return np.uint64 @pytest.fixture( - params=["int64", "float64", "category", "datetime64"], + params=["int64", "float64", "category", "datetime64", "object"], ) def invalid_dtype(self, request): return request.param diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index d9b093cc97fda..e80868fb08a09 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -28,7 +28,7 @@ def dtype(self): return np.int64 @pytest.fixture( - params=["uint64", "float64", "category", "datetime64"], + params=["uint64", "float64", "category", "datetime64", "object"], ) def invalid_dtype(self, request): return request.param
Separating various stuff from #41153, making that PR easier to review after this is merged, Similar idea as #41472. Nothing in this PR changes anything, but just makes it e.g. simpler to update test fixtures after this is merged.
https://api.github.com/repos/pandas-dev/pandas/pulls/41479
2021-05-14T23:13:21Z
2021-05-17T15:19:46Z
2021-05-17T15:19:46Z
2021-05-17T16:09:49Z
Backport PR #41462: CI: Fix changed flake8 error message after upgrade (#41462)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 31c926233d5b6..ffe615a63b7e3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ repos: hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.4 + rev: 3.9.2 hooks: - id: flake8 additional_dependencies: [flake8-comprehensions>=3.1.0] diff --git a/environment.yml b/environment.yml index 15c1611169427..72826124bc35d 100644 --- a/environment.yml +++ b/environment.yml @@ -20,7 +20,7 @@ dependencies: # code checks - black=20.8b1 - cpplint - - flake8 + - flake8=3.9.2 - flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions - isort>=5.2.1 # check that imports are in the right order - mypy=0.782 diff --git a/requirements-dev.txt b/requirements-dev.txt index f026fd421f937..5a64156fe997f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -8,7 +8,7 @@ asv cython>=0.29.21 black==20.8b1 cpplint -flake8 +flake8==3.9.2 flake8-comprehensions>=3.1.0 isort>=5.2.1 mypy==0.782 diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 7e4c68ddc183b..cbf3e84044d53 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -165,7 +165,7 @@ def test_bad_class(self, capsys): "indentation_is_not_a_multiple_of_four", # with flake8 3.9.0, the message ends with four spaces, # whereas in earlier versions, it ended with "four" - ("flake8 error: E111 indentation is not a multiple of ",), + ("flake8 error: E111 indentation is not a multiple of 4",), ), ( "BadDocstrings",
cc @jreback We could avoid the version changes in the pre commits if you prefer
https://api.github.com/repos/pandas-dev/pandas/pulls/41478
2021-05-14T20:25:23Z
2021-05-24T10:46:22Z
2021-05-24T10:46:22Z
2021-11-13T19:32:56Z
DEPS: bump pyarrow version to 0.17.0 #38870
diff --git a/.github/workflows/database.yml b/.github/workflows/database.yml index a5aef7825c770..69f2e689c0228 100644 --- a/.github/workflows/database.yml +++ b/.github/workflows/database.yml @@ -70,7 +70,7 @@ jobs: - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: pandas-dev - channel-priority: strict + channel-priority: flexible environment-file: ${{ matrix.ENV_FILE }} use-only-tar-bz2: true diff --git a/ci/deps/actions-37-db-min.yaml b/ci/deps/actions-37-db-min.yaml index 1d3794576220a..65c4c5769b1a3 100644 --- a/ci/deps/actions-37-db-min.yaml +++ b/ci/deps/actions-37-db-min.yaml @@ -31,7 +31,8 @@ dependencies: - openpyxl - pandas-gbq - google-cloud-bigquery>=1.27.2 # GH 36436 - - pyarrow=0.17 # GH 38803 + - protobuf>=3.12.4 + - pyarrow=0.17.1 # GH 38803 - pytables>=3.5.1 - scipy - xarray=0.12.3 diff --git a/ci/deps/actions-37-db.yaml b/ci/deps/actions-37-db.yaml index 8755e1a02c3cf..fa58f412cebf4 100644 --- a/ci/deps/actions-37-db.yaml +++ b/ci/deps/actions-37-db.yaml @@ -31,7 +31,7 @@ dependencies: - pandas-gbq - google-cloud-bigquery>=1.27.2 # GH 36436 - psycopg2 - - pyarrow>=0.15.0 + - pyarrow>=0.17.0 - pymysql - pytables - python-snappy diff --git a/ci/deps/actions-37-minimum_versions.yaml b/ci/deps/actions-37-minimum_versions.yaml index 3237cf9770220..aa5284e4f35d1 100644 --- a/ci/deps/actions-37-minimum_versions.yaml +++ b/ci/deps/actions-37-minimum_versions.yaml @@ -23,7 +23,7 @@ dependencies: - pytables=3.5.1 - python-dateutil=2.7.3 - pytz=2017.3 - - pyarrow=0.15 + - pyarrow=0.17.0 - scipy=1.2 - xlrd=1.2.0 - xlsxwriter=1.0.2 diff --git a/ci/deps/actions-37.yaml b/ci/deps/actions-37.yaml index f29830e9b3e79..a209a9099d2bb 100644 --- a/ci/deps/actions-37.yaml +++ b/ci/deps/actions-37.yaml @@ -18,7 +18,7 @@ dependencies: - numpy=1.19 - python-dateutil - nomkl - - pyarrow=0.15.1 + - pyarrow - pytz - s3fs>=0.4.0 - moto>=1.3.14 diff --git a/ci/deps/azure-macos-37.yaml b/ci/deps/azure-macos-37.yaml index 8c8b49ff3df5b..a0b1cdc684d2c 100644 --- a/ci/deps/azure-macos-37.yaml +++ b/ci/deps/azure-macos-37.yaml @@ -1,6 +1,7 @@ name: pandas-dev channels: - defaults + - conda-forge dependencies: - python=3.7.* @@ -21,7 +22,7 @@ dependencies: - numexpr - numpy=1.17.3 - openpyxl - - pyarrow=0.15.1 + - pyarrow=0.17.0 - pytables - python-dateutil==2.7.3 - pytz diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index c9d22ffbead45..8266e3bc4d07d 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -26,7 +26,7 @@ dependencies: - numexpr - numpy=1.17.* - openpyxl - - pyarrow=0.15 + - pyarrow=0.17.0 - pytables - python-dateutil - pytz diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 661d8813d32d2..200e695a69d1f 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -25,7 +25,7 @@ dependencies: - numpy=1.18.* - openpyxl - jinja2 - - pyarrow>=0.15.0 + - pyarrow>=0.17.0 - pytables - python-dateutil - pytz diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 16beb00d201b7..ce35e9e15976f 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -358,7 +358,7 @@ PyTables 3.5.1 HDF5-based reading / writing blosc 1.17.0 Compression for HDF5 zlib Compression for HDF5 fastparquet 0.4.0 Parquet reading / writing -pyarrow 0.15.0 Parquet, ORC, and feather reading / writing +pyarrow 0.17.0 Parquet, ORC, and feather reading / writing pyreadstat SPSS files (.sav) reading ========================= ================== ============================================================= diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 622029adf357f..b83a5916a075e 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -578,7 +578,7 @@ Optional libraries below the lowest tested version may still work, but are not c +-----------------+-----------------+---------+ | openpyxl | 3.0.0 | X | +-----------------+-----------------+---------+ -| pyarrow | 0.15.0 | | +| pyarrow | 0.17.0 | X | +-----------------+-----------------+---------+ | pymysql | 0.8.1 | X | +-----------------+-----------------+---------+ diff --git a/environment.yml b/environment.yml index 67b42d545af88..56a36c593a458 100644 --- a/environment.yml +++ b/environment.yml @@ -100,7 +100,7 @@ dependencies: - odfpy - fastparquet>=0.3.2 # pandas.read_parquet, DataFrame.to_parquet - - pyarrow>=0.15.0 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather + - pyarrow>=0.17.0 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - python-snappy # required by pyarrow - pyqt>=5.9.2 # pandas.read_clipboard diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 0ef6da53191c5..f8eccfeb2c60a 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -21,7 +21,7 @@ "odfpy": "1.3.0", "openpyxl": "3.0.0", "pandas_gbq": "0.12.0", - "pyarrow": "0.15.0", + "pyarrow": "0.17.0", "pytest": "5.0.1", "pyxlsb": "1.0.6", "s3fs": "0.4.0", diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py index 6ae3f75069899..7d27b617c0e6e 100644 --- a/pandas/tests/arrays/interval/test_interval.py +++ b/pandas/tests/arrays/interval/test_interval.py @@ -165,7 +165,7 @@ def test_repr(): # Arrow interaction -pyarrow_skip = td.skip_if_no("pyarrow", min_version="0.16.0") +pyarrow_skip = td.skip_if_no("pyarrow") @pyarrow_skip diff --git a/pandas/tests/arrays/masked/test_arrow_compat.py b/pandas/tests/arrays/masked/test_arrow_compat.py index 193017ddfcadf..9f755412dbf39 100644 --- a/pandas/tests/arrays/masked/test_arrow_compat.py +++ b/pandas/tests/arrays/masked/test_arrow_compat.py @@ -6,7 +6,7 @@ import pandas as pd import pandas._testing as tm -pa = pytest.importorskip("pyarrow", minversion="0.15.0") +pa = pytest.importorskip("pyarrow", minversion="0.17.0") from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask @@ -21,8 +21,6 @@ def data(request): def test_arrow_array(data): - # protocol added in 0.15.0 - arr = pa.array(data) expected = pa.array( data.to_numpy(object, na_value=None), @@ -31,10 +29,8 @@ def test_arrow_array(data): assert arr.equals(expected) -@td.skip_if_no("pyarrow", min_version="0.16.0") +@td.skip_if_no("pyarrow") def test_arrow_roundtrip(data): - # roundtrip possible from arrow 0.16.0 - df = pd.DataFrame({"a": data}) table = pa.table(df) assert table.field("a").type == str(data.dtype.numpy_dtype) @@ -43,7 +39,7 @@ def test_arrow_roundtrip(data): tm.assert_frame_equal(result, df) -@td.skip_if_no("pyarrow", min_version="0.16.0") +@td.skip_if_no("pyarrow") def test_arrow_load_from_zero_chunks(data): # GH-41040 @@ -58,7 +54,7 @@ def test_arrow_load_from_zero_chunks(data): tm.assert_frame_equal(result, df) -@td.skip_if_no("pyarrow", min_version="0.16.0") +@td.skip_if_no("pyarrow") def test_arrow_from_arrow_uint(): # https://github.com/pandas-dev/pandas/issues/31896 # possible mismatch in types @@ -70,7 +66,7 @@ def test_arrow_from_arrow_uint(): tm.assert_extension_array_equal(result, expected) -@td.skip_if_no("pyarrow", min_version="0.16.0") +@td.skip_if_no("pyarrow") def test_arrow_sliced(data): # https://github.com/pandas-dev/pandas/issues/38525 @@ -165,7 +161,7 @@ def test_pyarrow_array_to_numpy_and_mask(np_dtype_to_arrays): tm.assert_numpy_array_equal(mask, mask_expected_empty) -@td.skip_if_no("pyarrow", min_version="0.16.0") +@td.skip_if_no("pyarrow") def test_from_arrow_type_error(request, data): # ensure that __from_arrow__ returns a TypeError when getting a wrong # array type diff --git a/pandas/tests/arrays/period/test_arrow_compat.py b/pandas/tests/arrays/period/test_arrow_compat.py index d7b0704cdfb05..5211397f20c36 100644 --- a/pandas/tests/arrays/period/test_arrow_compat.py +++ b/pandas/tests/arrays/period/test_arrow_compat.py @@ -11,7 +11,7 @@ period_array, ) -pyarrow_skip = pyarrow_skip = td.skip_if_no("pyarrow", min_version="0.16.0") +pyarrow_skip = td.skip_if_no("pyarrow", min_version="0.17.0") @pyarrow_skip diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 3205664e7c80a..e3b43c544a477 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -437,7 +437,7 @@ def test_fillna_args(dtype, request): arr.fillna(value=1) -@td.skip_if_no("pyarrow", min_version="0.15.0") +@td.skip_if_no("pyarrow") def test_arrow_array(dtype): # protocol added in 0.15.0 import pyarrow as pa @@ -451,7 +451,7 @@ def test_arrow_array(dtype): assert arr.equals(expected) -@td.skip_if_no("pyarrow", min_version="0.16.0") +@td.skip_if_no("pyarrow") def test_arrow_roundtrip(dtype, dtype_object): # roundtrip possible from arrow 1.0.0 import pyarrow as pa @@ -467,7 +467,7 @@ def test_arrow_roundtrip(dtype, dtype_object): assert result.loc[2, "a"] is pd.NA -@td.skip_if_no("pyarrow", min_version="0.16.0") +@td.skip_if_no("pyarrow") def test_arrow_load_from_zero_chunks(dtype, dtype_object): # GH-41040 import pyarrow as pa diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index a5254f5ff7988..ba8a9ed070236 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -6,14 +6,12 @@ import pandas as pd import pandas._testing as tm -from pandas.util.version import Version from pandas.io.feather_format import read_feather, to_feather # isort:skip pyarrow = pytest.importorskip("pyarrow") -pyarrow_version = Version(pyarrow.__version__) filter_sparse = pytest.mark.filterwarnings("ignore:The Sparse") @@ -89,12 +87,11 @@ def test_basic(self): ), } ) - if pyarrow_version >= Version("0.17.0"): - df["periods"] = pd.period_range("2013", freq="M", periods=3) - df["timedeltas"] = pd.timedelta_range("1 day", periods=3) - # TODO temporary disable due to regression in pyarrow 0.17.1 - # https://github.com/pandas-dev/pandas/issues/34255 - # df["intervals"] = pd.interval_range(0, 3, 3) + df["periods"] = pd.period_range("2013", freq="M", periods=3) + df["timedeltas"] = pd.timedelta_range("1 day", periods=3) + # TODO temporary disable due to regression in pyarrow 0.17.1 + # https://github.com/pandas-dev/pandas/issues/34255 + # df["intervals"] = pd.interval_range(0, 3, 3) assert df.dttz.dtype.tz.zone == "US/Eastern" self.check_round_trip(df) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index f66451cd72309..ae6425cd93ac5 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -17,6 +17,10 @@ PY38, is_platform_windows, ) +from pandas.compat.pyarrow import ( + pa_version_under1p0, + pa_version_under2p0, +) import pandas.util._test_decorators as td import pandas as pd @@ -653,8 +657,6 @@ def test_categorical(self, pa): ) def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so): s3fs = pytest.importorskip("s3fs") - if Version(pyarrow.__version__) <= Version("0.17.0"): - pytest.skip() s3 = s3fs.S3FileSystem(**s3so) kw = {"filesystem": s3} check_round_trip( @@ -666,8 +668,6 @@ def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so): ) def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so): - if Version(pyarrow.__version__) <= Version("0.17.0"): - pytest.skip() # GH #19134 s3so = {"storage_options": s3so} check_round_trip( @@ -698,14 +698,12 @@ def test_s3_roundtrip_for_dir( # These are added to back of dataframe on read. In new API category dtype is # only used if partition field is string, but this changed again to use # category dtype for all types (not only strings) in pyarrow 2.0.0 - pa10 = (Version(pyarrow.__version__) >= Version("1.0.0")) and ( - Version(pyarrow.__version__) < Version("2.0.0") - ) if partition_col: - if pa10: - partition_col_type = "int32" - else: - partition_col_type = "category" + partition_col_type = ( + "int32" + if (not pa_version_under1p0) and pa_version_under2p0 + else "category" + ) expected_df[partition_col] = expected_df[partition_col].astype( partition_col_type @@ -795,7 +793,7 @@ def test_write_with_schema(self, pa): out_df = df.astype(bool) check_round_trip(df, pa, write_kwargs={"schema": schema}, expected=out_df) - @td.skip_if_no("pyarrow", min_version="0.15.0") + @td.skip_if_no("pyarrow") def test_additional_extension_arrays(self, pa): # test additional ExtensionArrays that are supported through the # __arrow_array__ protocol @@ -806,22 +804,10 @@ def test_additional_extension_arrays(self, pa): "c": pd.Series(["a", None, "c"], dtype="string"), } ) - if Version(pyarrow.__version__) >= Version("0.16.0"): - expected = df - else: - # de-serialized as plain int / object - expected = df.assign( - a=df.a.astype("int64"), b=df.b.astype("int64"), c=df.c.astype("object") - ) - check_round_trip(df, pa, expected=expected) + check_round_trip(df, pa) df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")}) - if Version(pyarrow.__version__) >= Version("0.16.0"): - expected = df - else: - # if missing values in integer, currently de-serialized as float - expected = df.assign(a=df.a.astype("float64")) - check_round_trip(df, pa, expected=expected) + check_round_trip(df, pa) @td.skip_if_no("pyarrow", min_version="1.0.0") def test_pyarrow_backed_string_array(self, pa): @@ -831,7 +817,7 @@ def test_pyarrow_backed_string_array(self, pa): df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="arrow_string")}) check_round_trip(df, pa, expected=df) - @td.skip_if_no("pyarrow", min_version="0.16.0") + @td.skip_if_no("pyarrow") def test_additional_extension_types(self, pa): # test additional ExtensionArrays that are supported through the # __arrow_array__ protocol + by defining a custom ExtensionType @@ -844,7 +830,7 @@ def test_additional_extension_types(self, pa): ) check_round_trip(df, pa) - @td.skip_if_no("pyarrow", min_version="0.16.0") + @td.skip_if_no("pyarrow") def test_use_nullable_dtypes(self, pa): import pyarrow.parquet as pq @@ -880,7 +866,7 @@ def test_timestamp_nanoseconds(self, pa): check_round_trip(df, pa, write_kwargs={"version": "2.0"}) def test_timezone_aware_index(self, pa, timezone_aware_date_list): - if Version(pyarrow.__version__) >= Version("2.0.0"): + if not pa_version_under2p0: # temporary skip this test until it is properly resolved # https://github.com/pandas-dev/pandas/issues/37286 pytest.skip() diff --git a/requirements-dev.txt b/requirements-dev.txt index 35fb6d18a7e81..d1fafbbf9101d 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -65,7 +65,7 @@ xlsxwriter xlwt odfpy fastparquet>=0.3.2 -pyarrow>=0.15.0 +pyarrow>=0.17.0 python-snappy pyqt5>=5.9.2 tables>=3.5.1
closes #38870
https://api.github.com/repos/pandas-dev/pandas/pulls/41476
2021-05-14T16:58:11Z
2021-05-17T15:21:56Z
2021-05-17T15:21:56Z
2021-06-18T02:25:25Z
DEPR: dropping nuisance columns in DataFrameGroupby apply, agg, transform
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index ef6d45fa0140b..7a55acbd3031d 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -1000,6 +1000,7 @@ instance method on each data group. This is pretty easy to do by passing lambda functions: .. ipython:: python + :okwarning: grouped = df.groupby("A") grouped.agg(lambda x: x.std()) @@ -1009,6 +1010,7 @@ arguments. Using a bit of metaprogramming cleverness, GroupBy now has the ability to "dispatch" method calls to the groups: .. ipython:: python + :okwarning: grouped.std() diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index d357e4a633347..e2e05d98845f6 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -720,6 +720,44 @@ For example: A 24 dtype: int64 + +Similarly, when applying a function to :class:`DataFrameGroupBy`, columns on which +the function raises ``TypeError`` are currently silently ignored and dropped +from the result. + +This behavior is deprecated. In a future version, the ``TypeError`` +will be raised, and users will need to select only valid columns before calling +the function. + +For example: + +.. ipython:: python + + df = pd.DataFrame({"A": [1, 2, 3, 4], "B": pd.date_range("2016-01-01", periods=4)}) + gb = df.groupby([1, 1, 2, 2]) + +*Old behavior*: + +.. code-block:: ipython + + In [4]: gb.prod(numeric_only=False) + Out[4]: + A + 1 2 + 2 12 + +.. code-block:: ipython + + In [5]: gb.prod(numeric_only=False) + ... + TypeError: datetime64 type does not support prod operations + + In [6]: gb[["A"]].prod(numeric_only=False) + Out[6]: + A + 1 2 + 2 12 + .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index c38c51d46f83e..dec68ab8f392d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1087,6 +1087,15 @@ def array_func(values: ArrayLike) -> ArrayLike: if not len(new_mgr) and len(orig): # If the original Manager was already empty, no need to raise raise DataError("No numeric types to aggregate") + if len(new_mgr) < len(data): + warnings.warn( + f"Dropping invalid columns in {type(self).__name__}.{how} " + "is deprecated. In a future version, a TypeError will be raised. " + f"Before calling .{how}, select only columns which should be " + "valid for the function.", + FutureWarning, + stacklevel=4, + ) return self._wrap_agged_manager(new_mgr) @@ -1283,6 +1292,16 @@ def arr_func(bvalues: ArrayLike) -> ArrayLike: res_mgr = mgr.grouped_reduce(arr_func, ignore_failures=True) res_mgr.set_axis(1, mgr.axes[1]) + if len(res_mgr) < len(mgr): + warnings.warn( + f"Dropping invalid columns in {type(self).__name__}.{how} " + "is deprecated. In a future version, a TypeError will be raised. " + f"Before calling .{how}, select only columns which should be " + "valid for the transforming function.", + FutureWarning, + stacklevel=4, + ) + res_df = self.obj._constructor(res_mgr) if self.axis == 1: res_df = res_df.T @@ -1420,7 +1439,14 @@ def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame: output[i] = sgb.transform(wrapper) except TypeError: # e.g. trying to call nanmean with string values - pass + warnings.warn( + f"Dropping invalid columns in {type(self).__name__}.transform " + "is deprecated. In a future version, a TypeError will be raised. " + "Before calling .transform, select only columns which should be " + "valid for the transforming function.", + FutureWarning, + stacklevel=5, + ) else: inds.append(i) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b27eb4bb8f325..3ce9b9bececf0 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -30,6 +30,7 @@ class providing the base-class of operations. Union, cast, ) +import warnings import numpy as np @@ -1270,6 +1271,14 @@ def _python_agg_general(self, func, *args, **kwargs): # if this function is invalid for this dtype, we will ignore it. result = self.grouper.agg_series(obj, f) except TypeError: + warnings.warn( + f"Dropping invalid columns in {type(self).__name__}.agg " + "is deprecated. In a future version, a TypeError will be raised. " + "Before calling .agg, select only columns which should be " + "valid for the aggregating function.", + FutureWarning, + stacklevel=3, + ) continue key = base.OutputKey(label=name, position=idx) @@ -2829,6 +2838,16 @@ def _get_cythonized_result( vals, inferences = pre_processing(vals) except TypeError as err: error_msg = str(err) + howstr = how.replace("group_", "") + warnings.warn( + "Dropping invalid columns in " + f"{type(self).__name__}.{howstr} is deprecated. " + "In a future version, a TypeError will be raised. " + f"Before calling .{howstr}, select only columns which " + "should be valid for the function.", + FutureWarning, + stacklevel=3, + ) continue vals = vals.astype(cython_dtype, copy=False) if needs_2d: diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 18739320d90d3..eb82e03aea82f 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -257,7 +257,8 @@ def func(ser): else: return ser.sum() - result = grouped.aggregate(func) + with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"): + result = grouped.aggregate(func) exp_grouped = three_group.loc[:, three_group.columns != "C"] expected = exp_grouped.groupby(["A", "B"]).aggregate(func) tm.assert_frame_equal(result, expected) @@ -1020,6 +1021,7 @@ def test_mangle_series_groupby(self): tm.assert_frame_equal(result, expected) @pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.") + @pytest.mark.filterwarnings("ignore:Dropping invalid columns:FutureWarning") def test_with_kwargs(self): f1 = lambda x, y, b=1: x.sum() + y + b f2 = lambda x, y, b=2: x.sum() + y * b diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 681192881c301..4d30543355d47 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -44,9 +44,16 @@ def test_agg_api(): def peak_to_peak(arr): return arr.max() - arr.min() - expected = grouped.agg([peak_to_peak]) + with tm.assert_produces_warning( + FutureWarning, match="Dropping invalid", check_stacklevel=False + ): + expected = grouped.agg([peak_to_peak]) expected.columns = ["data1", "data2"] - result = grouped.agg(peak_to_peak) + + with tm.assert_produces_warning( + FutureWarning, match="Dropping invalid", check_stacklevel=False + ): + result = grouped.agg(peak_to_peak) tm.assert_frame_equal(result, expected) @@ -294,7 +301,8 @@ def raiseException(df): raise TypeError("test") with pytest.raises(TypeError, match="test"): - df.groupby(0).agg(raiseException) + with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"): + df.groupby(0).agg(raiseException) def test_series_agg_multikey(): diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 4fa21a259e7cb..95bb010015f62 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -87,13 +87,15 @@ def test_max_min_object_multiple_columns(using_array_manager): gb = df.groupby("A") - result = gb.max(numeric_only=False) + with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"): + result = gb.max(numeric_only=False) # "max" is valid for column "C" but not for "B" ei = Index([1, 2, 3], name="A") expected = DataFrame({"C": ["b", "d", "e"]}, index=ei) tm.assert_frame_equal(result, expected) - result = gb.min(numeric_only=False) + with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"): + result = gb.min(numeric_only=False) # "min" is valid for column "C" but not for "B" ei = Index([1, 2, 3], name="A") expected = DataFrame({"C": ["a", "c", "e"]}, index=ei) @@ -221,7 +223,10 @@ def test_averages(self, df, method): ], ) - result = getattr(gb, method)(numeric_only=False) + with tm.assert_produces_warning( + FutureWarning, match="Dropping invalid", check_stacklevel=False + ): + result = getattr(gb, method)(numeric_only=False) tm.assert_frame_equal(result.reindex_like(expected), expected) expected_columns = expected.columns @@ -303,10 +308,27 @@ def test_cummin_cummax(self, df, method): def _check(self, df, method, expected_columns, expected_columns_numeric): gb = df.groupby("group") - result = getattr(gb, method)() + # cummin, cummax dont have numeric_only kwarg, always use False + warn = None + if method in ["cummin", "cummax"]: + # these dont have numeric_only kwarg, always use False + warn = FutureWarning + elif method in ["min", "max"]: + # these have numeric_only kwarg, but default to False + warn = FutureWarning + + with tm.assert_produces_warning(warn, match="Dropping invalid columns"): + result = getattr(gb, method)() + tm.assert_index_equal(result.columns, expected_columns_numeric) - result = getattr(gb, method)(numeric_only=False) + # GH#41475 deprecated silently ignoring nuisance columns + warn = None + if len(expected_columns) < len(gb._obj_with_exclusions.columns): + warn = FutureWarning + with tm.assert_produces_warning(warn, match="Dropping invalid columns"): + result = getattr(gb, method)(numeric_only=False) + tm.assert_index_equal(result.columns, expected_columns) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index c37dc17b85dd2..29402d6b8d538 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -923,7 +923,8 @@ def aggfun(ser): else: return ser.sum() - agged2 = df.groupby(keys).aggregate(aggfun) + with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"): + agged2 = df.groupby(keys).aggregate(aggfun) assert len(agged2.columns) + 1 == len(df.columns) @@ -1757,6 +1758,7 @@ def test_pivot_table_values_key_error(): @pytest.mark.parametrize( "op", ["idxmax", "idxmin", "mad", "min", "max", "sum", "prod", "skew"] ) +@pytest.mark.filterwarnings("ignore:Dropping invalid columns:FutureWarning") @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_empty_groupby(columns, keys, values, method, op, request): # GH8093 & GH26411 diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/test_quantile.py index 9c9d1aa881890..90437b9139594 100644 --- a/pandas/tests/groupby/test_quantile.py +++ b/pandas/tests/groupby/test_quantile.py @@ -155,7 +155,10 @@ def test_quantile_raises(): df = DataFrame([["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"]) with pytest.raises(TypeError, match="cannot be performed against 'object' dtypes"): - df.groupby("key").quantile() + with tm.assert_produces_warning( + FutureWarning, match="Dropping invalid columns" + ): + df.groupby("key").quantile() def test_quantile_out_of_bounds_q_raises(): @@ -236,7 +239,11 @@ def test_groupby_quantile_nullable_array(values, q): @pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]]) def test_groupby_quantile_skips_invalid_dtype(q): df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]}) - result = df.groupby("a").quantile(q) + + warn = None if isinstance(q, list) else FutureWarning + with tm.assert_produces_warning(warn, match="Dropping invalid columns"): + result = df.groupby("a").quantile(q) + expected = df.groupby("a")[["b"]].quantile(q) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 09317cbeec658..1949d03998512 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -409,7 +409,9 @@ def test_transform_exclude_nuisance(df, duplicates): grouped = df.groupby("A") gbc = grouped["C"] - expected["C"] = gbc.transform(np.mean) + warn = FutureWarning if duplicates else None + with tm.assert_produces_warning(warn, match="Dropping invalid columns"): + expected["C"] = gbc.transform(np.mean) if duplicates: # squeeze 1-column DataFrame down to Series expected["C"] = expected["C"]["C"] @@ -422,14 +424,16 @@ def test_transform_exclude_nuisance(df, duplicates): expected["D"] = grouped["D"].transform(np.mean) expected = DataFrame(expected) - result = df.groupby("A").transform(np.mean) + with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"): + result = df.groupby("A").transform(np.mean) tm.assert_frame_equal(result, expected) def test_transform_function_aliases(df): - result = df.groupby("A").transform("mean") - expected = df.groupby("A").transform(np.mean) + with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"): + result = df.groupby("A").transform("mean") + expected = df.groupby("A").transform(np.mean) tm.assert_frame_equal(result, expected) result = df.groupby("A")["C"].transform("mean") @@ -498,7 +502,10 @@ def test_groupby_transform_with_int(): } ) with np.errstate(all="ignore"): - result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) + with tm.assert_produces_warning( + FutureWarning, match="Dropping invalid columns" + ): + result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) expected = DataFrame( {"B": np.nan, "C": Series([-1, 0, 1, -1, 0, 1], dtype="float64")} ) @@ -514,7 +521,10 @@ def test_groupby_transform_with_int(): } ) with np.errstate(all="ignore"): - result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) + with tm.assert_produces_warning( + FutureWarning, match="Dropping invalid columns" + ): + result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) expected = DataFrame({"B": np.nan, "C": [-1.0, 0.0, 1.0, -1.0, 0.0, 1.0]}) tm.assert_frame_equal(result, expected) @@ -522,7 +532,10 @@ def test_groupby_transform_with_int(): s = Series([2, 3, 4, 10, 5, -1]) df = DataFrame({"A": [1, 1, 1, 2, 2, 2], "B": 1, "C": s, "D": "foo"}) with np.errstate(all="ignore"): - result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) + with tm.assert_produces_warning( + FutureWarning, match="Dropping invalid columns" + ): + result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) s1 = s.iloc[0:3] s1 = (s1 - s1.mean()) / s1.std() @@ -532,7 +545,8 @@ def test_groupby_transform_with_int(): tm.assert_frame_equal(result, expected) # int doesn't get downcasted - result = df.groupby("A").transform(lambda x: x * 2 / 2) + with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"): + result = df.groupby("A").transform(lambda x: x * 2 / 2) expected = DataFrame({"B": 1.0, "C": [2.0, 3.0, 4.0, 10.0, 5.0, -1.0]}) tm.assert_frame_equal(result, expected) @@ -791,7 +805,11 @@ def test_transform_numeric_ret(cols, exp, comp_func, agg_func, request): {"a": date_range("2018-01-01", periods=3), "b": range(3), "c": range(7, 10)} ) - result = df.groupby("b")[cols].transform(agg_func) + warn = FutureWarning + if isinstance(exp, Series) or agg_func != "size": + warn = None + with tm.assert_produces_warning(warn, match="Dropping invalid columns"): + result = df.groupby("b")[cols].transform(agg_func) if agg_func == "rank": exp = exp.astype("float") @@ -1103,7 +1121,12 @@ def test_transform_agg_by_name(request, reduction_func, obj): args = {"nth": [0], "quantile": [0.5], "corrwith": [obj]}.get(func, []) - result = g.transform(func, *args) + warn = None + if isinstance(obj, DataFrame) and func == "size": + warn = FutureWarning + + with tm.assert_produces_warning(warn, match="Dropping invalid columns"): + result = g.transform(func, *args) # this is the *definition* of a transformation tm.assert_index_equal(result.index, obj.index)
- [x] closes #21664 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Discussed on this week's call.
https://api.github.com/repos/pandas-dev/pandas/pulls/41475
2021-05-14T16:52:37Z
2021-05-26T01:53:59Z
2021-05-26T01:53:59Z
2022-02-19T17:29:08Z
TST/CLN: move some tests
diff --git a/pandas/tests/strings/test_case_justify.py b/pandas/tests/strings/test_case_justify.py index d6e2ca7399b4e..24e1f99ad7956 100644 --- a/pandas/tests/strings/test_case_justify.py +++ b/pandas/tests/strings/test_case_justify.py @@ -81,6 +81,15 @@ def test_swapcase_mixed_object(): tm.assert_series_equal(result, expected) +def test_casefold(): + # GH25405 + expected = Series(["ss", np.nan, "case", "ssd"]) + s = Series(["ß", np.nan, "case", "ßd"]) + result = s.str.casefold() + + tm.assert_series_equal(result, expected) + + def test_casemethods(any_string_dtype): values = ["aaa", "bbb", "CCC", "Dddd", "eEEE"] s = Series(values, dtype=any_string_dtype) diff --git a/pandas/tests/strings/test_get_dummies.py b/pandas/tests/strings/test_get_dummies.py new file mode 100644 index 0000000000000..31386e4e342ae --- /dev/null +++ b/pandas/tests/strings/test_get_dummies.py @@ -0,0 +1,53 @@ +import numpy as np + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + _testing as tm, +) + + +def test_get_dummies(any_string_dtype): + s = Series(["a|b", "a|c", np.nan], dtype=any_string_dtype) + result = s.str.get_dummies("|") + expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]], columns=list("abc")) + tm.assert_frame_equal(result, expected) + + s = Series(["a;b", "a", 7], dtype=any_string_dtype) + result = s.str.get_dummies(";") + expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]], columns=list("7ab")) + tm.assert_frame_equal(result, expected) + + +def test_get_dummies_index(): + # GH9980, GH8028 + idx = Index(["a|b", "a|c", "b|c"]) + result = idx.str.get_dummies("|") + + expected = MultiIndex.from_tuples( + [(1, 1, 0), (1, 0, 1), (0, 1, 1)], names=("a", "b", "c") + ) + tm.assert_index_equal(result, expected) + + +def test_get_dummies_with_name_dummy(any_string_dtype): + # GH 12180 + # Dummies named 'name' should work as expected + s = Series(["a", "b,name", "b"], dtype=any_string_dtype) + result = s.str.get_dummies(",") + expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]], columns=["a", "b", "name"]) + tm.assert_frame_equal(result, expected) + + +def test_get_dummies_with_name_dummy_index(): + # GH 12180 + # Dummies named 'name' should work as expected + idx = Index(["a|b", "name|c", "b|name"]) + result = idx.str.get_dummies("|") + + expected = MultiIndex.from_tuples( + [(1, 1, 0, 0), (0, 0, 1, 1), (0, 1, 0, 1)], names=("a", "b", "c", "name") + ) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py index 80010de047cd5..42d81154dea0f 100644 --- a/pandas/tests/strings/test_strings.py +++ b/pandas/tests/strings/test_strings.py @@ -303,50 +303,6 @@ def test_isnumeric(any_string_dtype): tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e, dtype=dtype)) -def test_get_dummies(any_string_dtype): - s = Series(["a|b", "a|c", np.nan], dtype=any_string_dtype) - result = s.str.get_dummies("|") - expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]], columns=list("abc")) - tm.assert_frame_equal(result, expected) - - s = Series(["a;b", "a", 7], dtype=any_string_dtype) - result = s.str.get_dummies(";") - expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]], columns=list("7ab")) - tm.assert_frame_equal(result, expected) - - -def test_get_dummies_index(): - # GH9980, GH8028 - idx = Index(["a|b", "a|c", "b|c"]) - result = idx.str.get_dummies("|") - - expected = MultiIndex.from_tuples( - [(1, 1, 0), (1, 0, 1), (0, 1, 1)], names=("a", "b", "c") - ) - tm.assert_index_equal(result, expected) - - -def test_get_dummies_with_name_dummy(any_string_dtype): - # GH 12180 - # Dummies named 'name' should work as expected - s = Series(["a", "b,name", "b"], dtype=any_string_dtype) - result = s.str.get_dummies(",") - expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]], columns=["a", "b", "name"]) - tm.assert_frame_equal(result, expected) - - -def test_get_dummies_with_name_dummy_index(): - # GH 12180 - # Dummies named 'name' should work as expected - idx = Index(["a|b", "name|c", "b|name"]) - result = idx.str.get_dummies("|") - - expected = MultiIndex.from_tuples( - [(1, 1, 0, 0), (0, 0, 1, 1), (0, 1, 0, 1)], names=("a", "b", "c", "name") - ) - tm.assert_index_equal(result, expected) - - def test_join(): values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"]) result = values.str.split("_").str.join("_") @@ -782,15 +738,6 @@ def test_method_on_bytes(): lhs.str.cat(rhs) -def test_casefold(): - # GH25405 - expected = Series(["ss", np.nan, "case", "ssd"]) - s = Series(["ß", np.nan, "case", "ßd"]) - result = s.str.casefold() - - tm.assert_series_equal(result, expected) - - def test_str_accessor_in_apply_func(): # https://github.com/pandas-dev/pandas/issues/38979 df = DataFrame(zip("abc", "def"))
no changes, only moved. maybe could split pandas/tests/strings/test_strings.py by method
https://api.github.com/repos/pandas-dev/pandas/pulls/41474
2021-05-14T16:04:44Z
2021-05-14T17:11:30Z
2021-05-14T17:11:30Z
2021-05-14T17:14:48Z
[ArrowStringArray] TST: move/combine a couple of tests
diff --git a/pandas/tests/strings/test_case_justify.py b/pandas/tests/strings/test_case_justify.py index d6e2ca7399b4e..73f6b2e9a1deb 100644 --- a/pandas/tests/strings/test_case_justify.py +++ b/pandas/tests/strings/test_case_justify.py @@ -49,10 +49,21 @@ def test_lower_upper_mixed_object(): tm.assert_series_equal(result, expected) -def test_capitalize(any_string_dtype): - s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype) +@pytest.mark.parametrize( + "data, expected", + [ + ( + ["FOO", "BAR", np.nan, "Blah", "blurg"], + ["Foo", "Bar", np.nan, "Blah", "Blurg"], + ), + (["a", "b", "c"], ["A", "B", "C"]), + (["a b", "a bc. de"], ["A b", "A bc. de"]), + ], +) +def test_capitalize(data, expected, any_string_dtype): + s = Series(data, dtype=any_string_dtype) result = s.str.capitalize() - expected = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"], dtype=any_string_dtype) + expected = Series(expected, dtype=any_string_dtype) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/strings/test_split_partition.py b/pandas/tests/strings/test_split_partition.py index e59105eccc67c..f3f5acd0d2f1c 100644 --- a/pandas/tests/strings/test_split_partition.py +++ b/pandas/tests/strings/test_split_partition.py @@ -614,56 +614,61 @@ def test_partition_sep_kwarg(any_string_dtype): def test_get(): - values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"]) - - result = values.str.split("_").str.get(1) + ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"]) + result = ser.str.split("_").str.get(1) expected = Series(["b", "d", np.nan, "g"]) tm.assert_series_equal(result, expected) - # mixed - mixed = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0]) - rs = Series(mixed).str.split("_").str.get(1) - xp = Series(["b", np.nan, "d", np.nan, np.nan, np.nan, np.nan, np.nan]) +def test_get_mixed_object(): + ser = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0]) + result = ser.str.split("_").str.get(1) + expected = Series(["b", np.nan, "d", np.nan, np.nan, np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) - assert isinstance(rs, Series) - tm.assert_almost_equal(rs, xp) - # bounds testing - values = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"]) +def test_get_bounds(): + ser = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"]) # positive index - result = values.str.split("_").str.get(2) + result = ser.str.split("_").str.get(2) expected = Series(["3", "8", np.nan]) tm.assert_series_equal(result, expected) # negative index - result = values.str.split("_").str.get(-3) + result = ser.str.split("_").str.get(-3) expected = Series(["3", "8", np.nan]) tm.assert_series_equal(result, expected) def test_get_complex(): # GH 20671, getting value not in dict raising `KeyError` - values = Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, {1: "a", 2: "b", 3: "c"}]) + ser = Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, {1: "a", 2: "b", 3: "c"}]) - result = values.str.get(1) + result = ser.str.get(1) expected = Series([2, 2, np.nan, "a"]) tm.assert_series_equal(result, expected) - result = values.str.get(-1) + result = ser.str.get(-1) expected = Series([3, 3, np.nan, np.nan]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("to_type", [tuple, list, np.array]) def test_get_complex_nested(to_type): - values = Series([to_type([to_type([1, 2])])]) + ser = Series([to_type([to_type([1, 2])])]) - result = values.str.get(0) + result = ser.str.get(0) expected = Series([to_type([1, 2])]) tm.assert_series_equal(result, expected) - result = values.str.get(1) + result = ser.str.get(1) expected = Series([np.nan]) tm.assert_series_equal(result, expected) + + +def test_get_strings(any_string_dtype): + ser = Series(["a", "ab", np.nan, "abc"], dtype=any_string_dtype) + result = ser.str.get(2) + expected = Series([np.nan, np.nan, np.nan, "c"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/strings/test_string_array.py b/pandas/tests/strings/test_string_array.py index f90d219159c7e..0de93b479e43e 100644 --- a/pandas/tests/strings/test_string_array.py +++ b/pandas/tests/strings/test_string_array.py @@ -1,11 +1,8 @@ -import operator - import numpy as np import pytest from pandas._libs import lib -import pandas as pd from pandas import ( DataFrame, Series, @@ -99,27 +96,3 @@ def test_string_array_extract(nullable_string_dtype): result = result.astype(object) tm.assert_equal(result, expected) - - -def test_str_get_stringarray_multiple_nans(nullable_string_dtype): - s = Series(pd.array(["a", "ab", pd.NA, "abc"], dtype=nullable_string_dtype)) - result = s.str.get(2) - expected = Series(pd.array([pd.NA, pd.NA, pd.NA, "c"], dtype=nullable_string_dtype)) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize( - "input, method", - [ - (["a", "b", "c"], operator.methodcaller("capitalize")), - (["a b", "a bc. de"], operator.methodcaller("capitalize")), - ], -) -def test_capitalize(input, method, nullable_string_dtype): - a = Series(input, dtype=nullable_string_dtype) - b = Series(input, dtype="object") - result = method(a.str) - expected = method(b.str) - - assert result.dtype.name == nullable_string_dtype - tm.assert_series_equal(result.astype(object), expected)
separate commits to simplify review (we may be able to remove pandas/tests/strings/test_string_array.py eventually)
https://api.github.com/repos/pandas-dev/pandas/pulls/41473
2021-05-14T15:48:45Z
2021-05-17T15:31:31Z
2021-05-17T15:31:31Z
2021-05-17T15:56:23Z
REF: collect methods in NumericIndex
diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 88b0b019324ea..de7c522b4fbec 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -16,7 +16,10 @@ Dtype, DtypeObj, ) -from pandas.util._decorators import doc +from pandas.util._decorators import ( + cache_readonly, + doc, +) from pandas.core.dtypes.cast import astype_nansafe from pandas.core.dtypes.common import ( @@ -43,6 +46,40 @@ _num_index_shared_docs = {} +_num_index_shared_docs[ + "class_descr" +] = """ + Immutable sequence used for indexing and alignment. The basic object + storing axis labels for all pandas objects. %(klass)s is a special case + of `Index` with purely %(ltype)s labels. %(extra)s. + + Parameters + ---------- + data : array-like (1-dimensional) + dtype : NumPy dtype (default: %(dtype)s) + copy : bool + Make a copy of input ndarray. + name : object + Name to be stored in the index. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + Index : The base pandas Index type. + + Notes + ----- + An Index instance can **only** contain hashable objects. +""" + + class NumericIndex(Index): """ Provide numeric type operations. @@ -50,6 +87,12 @@ class NumericIndex(Index): This is an abstract class. """ + _index_descr_args = { + "klass": "NumericIndex", + "ltype": "integer or float", + "dtype": "inferred", + "extra": "", + } _values: np.ndarray _default_dtype: np.dtype _dtype_validation_metadata: tuple[Callable[..., bool], str] @@ -57,6 +100,36 @@ class NumericIndex(Index): _is_numeric_dtype = True _can_hold_strings = False + @cache_readonly + def _can_hold_na(self) -> bool: + if is_float_dtype(self.dtype): + return True + else: + return False + + @cache_readonly + def _engine_type(self): + return { + np.int8: libindex.Int8Engine, + np.int16: libindex.Int16Engine, + np.int32: libindex.Int32Engine, + np.int64: libindex.Int64Engine, + np.uint8: libindex.UInt8Engine, + np.uint16: libindex.UInt16Engine, + np.uint32: libindex.UInt32Engine, + np.uint64: libindex.UInt64Engine, + np.float32: libindex.Float32Engine, + np.float64: libindex.Float64Engine, + }[self.dtype.type] + + @cache_readonly + def inferred_type(self) -> str: + return { + "i": "integer", + "u": "integer", + "f": "floating", + }[self.dtype.kind] + def __new__(cls, data=None, dtype: Dtype | None = None, copy=False, name=None): name = maybe_extract_name(name, data, cls) @@ -84,8 +157,10 @@ def _ensure_array(cls, data, dtype, copy: bool): if issubclass(data.dtype.type, str): cls._string_data_error(data) - if copy or not is_dtype_equal(data.dtype, cls._default_dtype): - subarr = np.array(data, dtype=cls._default_dtype, copy=copy) + dtype = cls._ensure_dtype(dtype) + + if copy or not is_dtype_equal(data.dtype, dtype): + subarr = np.array(data, dtype=dtype, copy=copy) cls._assert_safe_casting(data, subarr) else: subarr = data @@ -108,9 +183,65 @@ def _validate_dtype(cls, dtype: Dtype | None) -> None: f"Incorrect `dtype` passed: expected {expected}, received {dtype}" ) + @classmethod + def _ensure_dtype( + cls, + dtype: Dtype | None, + ) -> np.dtype | None: + """Ensure int64 dtype for Int64Index, etc. Assumed dtype is validated.""" + return cls._default_dtype + + def __contains__(self, key) -> bool: + """ + Check if key is a float and has a decimal. If it has, return False. + """ + if not is_integer_dtype(self.dtype): + return super().__contains__(key) + + hash(key) + try: + if is_float(key) and int(key) != key: + # otherwise the `key in self._engine` check casts e.g. 1.1 -> 1 + return False + return key in self._engine + except (OverflowError, TypeError, ValueError): + return False + + @doc(Index.astype) + def astype(self, dtype, copy=True): + if is_float_dtype(self.dtype): + dtype = pandas_dtype(dtype) + if needs_i8_conversion(dtype): + raise TypeError( + f"Cannot convert Float64Index to dtype {dtype}; integer " + "values are required for conversion" + ) + elif is_integer_dtype(dtype) and not is_extension_array_dtype(dtype): + # TODO(jreback); this can change once we have an EA Index type + # GH 13149 + arr = astype_nansafe(self._values, dtype=dtype) + return Int64Index(arr, name=self.name) + + return super().astype(dtype, copy=copy) + # ---------------------------------------------------------------- # Indexing Methods + @doc(Index._should_fallback_to_positional) + def _should_fallback_to_positional(self) -> bool: + return False + + @doc(Index._convert_slice_indexer) + def _convert_slice_indexer(self, key: slice, kind: str): + if is_float_dtype(self.dtype): + assert kind in ["loc", "getitem"] + + # We always treat __getitem__ slicing as label-based + # translate to locations + return self.slice_indexer(key.start, key.stop, key.step, kind=kind) + + return super()._convert_slice_indexer(key, kind=kind) + @doc(Index._maybe_cast_slice_bound) def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): assert kind in ["loc", "getitem", None, lib.no_default] @@ -119,6 +250,21 @@ def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): # we will try to coerce to integers return self._maybe_cast_indexer(label) + @doc(Index._convert_arr_indexer) + def _convert_arr_indexer(self, keyarr) -> np.ndarray: + if not is_unsigned_integer_dtype(self.dtype): + return super()._convert_arr_indexer(keyarr) + + # Cast the indexer to uint64 if possible so that the values returned + # from indexing are also uint64. + dtype = None + if is_integer_dtype(keyarr) or ( + lib.infer_dtype(keyarr, skipna=False) == "integer" + ): + dtype = np.dtype(np.uint64) + + return com.asarray_tuplesafe(keyarr, dtype=dtype) + # ---------------------------------------------------------------- @doc(Index._shallow_copy) @@ -150,13 +296,16 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: return is_numeric_dtype(dtype) @classmethod - def _assert_safe_casting(cls, data, subarr): + def _assert_safe_casting(cls, data: np.ndarray, subarr: np.ndarray) -> None: """ - Subclasses need to override this only if the process of casting data - from some accepted dtype to the internal dtype(s) bears the risk of - truncation (e.g. float to int). + Ensure incoming data can be represented with matching signed-ness. + + Needed if the process of casting data from some accepted dtype to the internal + dtype(s) bears the risk of truncation (e.g. float to int). """ - pass + if is_integer_dtype(subarr.dtype): + if not np.array_equal(data, subarr): + raise TypeError("Unsafe NumPy casting, you must explicitly cast") @property def _is_all_dates(self) -> bool: @@ -165,46 +314,29 @@ def _is_all_dates(self) -> bool: """ return False + def _format_native_types( + self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs + ): + from pandas.io.formats.format import FloatArrayFormatter -_num_index_shared_docs[ - "class_descr" -] = """ - Immutable sequence used for indexing and alignment. The basic object - storing axis labels for all pandas objects. %(klass)s is a special case - of `Index` with purely %(ltype)s labels. %(extra)s. - - Parameters - ---------- - data : array-like (1-dimensional) - dtype : NumPy dtype (default: %(dtype)s) - copy : bool - Make a copy of input ndarray. - name : object - Name to be stored in the index. - - Attributes - ---------- - None - - Methods - ------- - None - - See Also - -------- - Index : The base pandas Index type. - - Notes - ----- - An Index instance can **only** contain hashable objects. -""" + if is_float_dtype(self.dtype): + formatter = FloatArrayFormatter( + self._values, + na_rep=na_rep, + float_format=float_format, + decimal=decimal, + quoting=quoting, + fixed_width=False, + ) + return formatter.get_result_as_array() -_int64_descr_args = { - "klass": "Int64Index", - "ltype": "integer", - "dtype": "int64", - "extra": "", -} + return super()._format_native_types( + na_rep=na_rep, + float_format=float_format, + decimal=decimal, + quoting=quoting, + **kwargs, + ) class IntegerIndex(NumericIndex): @@ -212,38 +344,6 @@ class IntegerIndex(NumericIndex): This is an abstract class for Int64Index, UInt64Index. """ - _default_dtype: np.dtype - _can_hold_na = False - - @classmethod - def _assert_safe_casting(cls, data, subarr): - """ - Ensure incoming data can be represented with matching signed-ness. - """ - if data.dtype.kind != cls._default_dtype.kind: - if not np.array_equal(data, subarr): - raise TypeError("Unsafe NumPy casting, you must explicitly cast") - - def __contains__(self, key) -> bool: - """ - Check if key is a float and has a decimal. If it has, return False. - """ - hash(key) - try: - if is_float(key) and int(key) != key: - # otherwise the `key in self._engine` check casts e.g. 1.1 -> 1 - return False - return key in self._engine - except (OverflowError, TypeError, ValueError): - return False - - @property - def inferred_type(self) -> str: - """ - Always 'integer' for ``Int64Index`` and ``UInt64Index`` - """ - return "integer" - @property def asi8(self) -> np.ndarray: # do not cache or you'll create a memory leak @@ -256,7 +356,13 @@ def asi8(self) -> np.ndarray: class Int64Index(IntegerIndex): - __doc__ = _num_index_shared_docs["class_descr"] % _int64_descr_args + _index_descr_args = { + "klass": "Int64Index", + "ltype": "integer", + "dtype": "int64", + "extra": "", + } + __doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args _typ = "int64index" _engine_type = libindex.Int64Engine @@ -264,104 +370,31 @@ class Int64Index(IntegerIndex): _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer") -_uint64_descr_args = { - "klass": "UInt64Index", - "ltype": "unsigned integer", - "dtype": "uint64", - "extra": "", -} - - class UInt64Index(IntegerIndex): - __doc__ = _num_index_shared_docs["class_descr"] % _uint64_descr_args + _index_descr_args = { + "klass": "UInt64Index", + "ltype": "unsigned integer", + "dtype": "uint64", + "extra": "", + } + __doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args _typ = "uint64index" _engine_type = libindex.UInt64Engine _default_dtype = np.dtype(np.uint64) _dtype_validation_metadata = (is_unsigned_integer_dtype, "unsigned integer") - # ---------------------------------------------------------------- - # Indexing Methods - - @doc(Index._convert_arr_indexer) - def _convert_arr_indexer(self, keyarr): - # Cast the indexer to uint64 if possible so that the values returned - # from indexing are also uint64. - dtype = None - if is_integer_dtype(keyarr) or ( - lib.infer_dtype(keyarr, skipna=False) == "integer" - ): - dtype = np.dtype(np.uint64) - - return com.asarray_tuplesafe(keyarr, dtype=dtype) - - -_float64_descr_args = { - "klass": "Float64Index", - "dtype": "float64", - "ltype": "float", - "extra": "", -} - class Float64Index(NumericIndex): - __doc__ = _num_index_shared_docs["class_descr"] % _float64_descr_args + _index_descr_args = { + "klass": "Float64Index", + "dtype": "float64", + "ltype": "float", + "extra": "", + } + __doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args _typ = "float64index" _engine_type = libindex.Float64Engine _default_dtype = np.dtype(np.float64) _dtype_validation_metadata = (is_float_dtype, "float") - - @property - def inferred_type(self) -> str: - """ - Always 'floating' for ``Float64Index`` - """ - return "floating" - - @doc(Index.astype) - def astype(self, dtype, copy=True): - dtype = pandas_dtype(dtype) - if needs_i8_conversion(dtype): - raise TypeError( - f"Cannot convert Float64Index to dtype {dtype}; integer " - "values are required for conversion" - ) - elif is_integer_dtype(dtype) and not is_extension_array_dtype(dtype): - # TODO(jreback); this can change once we have an EA Index type - # GH 13149 - arr = astype_nansafe(self._values, dtype=dtype) - return Int64Index(arr, name=self.name) - return super().astype(dtype, copy=copy) - - # ---------------------------------------------------------------- - # Indexing Methods - - @doc(Index._should_fallback_to_positional) - def _should_fallback_to_positional(self) -> bool: - return False - - @doc(Index._convert_slice_indexer) - def _convert_slice_indexer(self, key: slice, kind: str): - assert kind in ["loc", "getitem"] - - # We always treat __getitem__ slicing as label-based - # translate to locations - return self.slice_indexer(key.start, key.stop, key.step) - - # ---------------------------------------------------------------- - - def _format_native_types( - self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs - ): - from pandas.io.formats.format import FloatArrayFormatter - - formatter = FloatArrayFormatter( - self._values, - na_rep=na_rep, - float_format=float_format, - decimal=decimal, - quoting=quoting, - fixed_width=False, - ) - return formatter.get_result_as_array() diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 8a91ba22fcba1..0e6fb77e8b51b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -97,7 +97,6 @@ class RangeIndex(NumericIndex): _typ = "rangeindex" _engine_type = libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer") - _can_hold_na = False _range: range # --------------------------------------------------------------------
Seperates the refactoring of existing methods out of #41153. Will make it easier to see what #41153 actually brings in of new things. This PR changes no functionality, but only makes the exisiting numeric indexes into thin subclasses of the existing `NumericIndex`.
https://api.github.com/repos/pandas-dev/pandas/pulls/41472
2021-05-14T14:58:28Z
2021-05-18T13:05:52Z
2021-05-18T13:05:52Z
2021-05-18T13:56:07Z
[ArrowStringArray] TST: parametrize tests/strings/test_find_replace.py
diff --git a/pandas/tests/strings/test_find_replace.py b/pandas/tests/strings/test_find_replace.py index 06a7c6d56a61d..843b0ba55e691 100644 --- a/pandas/tests/strings/test_find_replace.py +++ b/pandas/tests/strings/test_find_replace.py @@ -6,7 +6,6 @@ import pandas as pd from pandas import ( - Index, Series, _testing as tm, ) @@ -273,15 +272,14 @@ def test_replace_unicode(any_string_dtype): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize("klass", [Series, Index]) @pytest.mark.parametrize("repl", [None, 3, {"a": "b"}]) @pytest.mark.parametrize("data", [["a", "b", None], ["a", "b", "c", "ad"]]) -def test_replace_raises(any_string_dtype, klass, repl, data): +def test_replace_raises(any_string_dtype, index_or_series, repl, data): # https://github.com/pandas-dev/pandas/issues/13438 msg = "repl must be a string or callable" - values = klass(data, dtype=any_string_dtype) + obj = index_or_series(data, dtype=any_string_dtype) with pytest.raises(TypeError, match=msg): - values.str.replace("a", repl) + obj.str.replace("a", repl) def test_replace_callable(any_string_dtype): @@ -486,39 +484,32 @@ def test_match_case_kwarg(any_string_dtype): tm.assert_series_equal(result, expected) -def test_fullmatch(): +def test_fullmatch(any_string_dtype): # GH 32806 - ser = Series(["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"]) + ser = Series( + ["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"], dtype=any_string_dtype + ) result = ser.str.fullmatch(".*BAD[_]+.*BAD") - expected = Series([True, False, np.nan, False]) + expected_dtype = "object" if any_string_dtype == "object" else "boolean" + expected = Series([True, False, np.nan, False], dtype=expected_dtype) tm.assert_series_equal(result, expected) - ser = Series(["ab", "AB", "abc", "ABC"]) + ser = Series(["ab", "AB", "abc", "ABC"], dtype=any_string_dtype) result = ser.str.fullmatch("ab", case=False) - expected = Series([True, True, False, False]) + expected_dtype = np.bool_ if any_string_dtype == "object" else "boolean" + expected = Series([True, True, False, False], dtype=expected_dtype) tm.assert_series_equal(result, expected) -def test_fullmatch_nullable_string_dtype(nullable_string_dtype): - ser = Series( - ["fooBAD__barBAD", "BAD_BADleroybrown", None, "foo"], - dtype=nullable_string_dtype, - ) - result = ser.str.fullmatch(".*BAD[_]+.*BAD") - # Result is nullable boolean - expected = Series([True, False, np.nan, False], dtype="boolean") +def test_findall(any_string_dtype): + ser = Series(["fooBAD__barBAD", np.nan, "foo", "BAD"], dtype=any_string_dtype) + result = ser.str.findall("BAD[_]*") + expected = Series([["BAD__", "BAD"], np.nan, [], ["BAD"]]) tm.assert_series_equal(result, expected) -def test_findall(): - values = Series(["fooBAD__barBAD", np.nan, "foo", "BAD"]) - - result = values.str.findall("BAD[_]*") - exp = Series([["BAD__", "BAD"], np.nan, [], ["BAD"]]) - tm.assert_almost_equal(result, exp) - - # mixed - mixed = Series( +def test_findall_mixed_object(): + ser = Series( [ "fooBAD__barBAD", np.nan, @@ -532,8 +523,8 @@ def test_findall(): ] ) - rs = Series(mixed).str.findall("BAD[_]*") - xp = Series( + result = ser.str.findall("BAD[_]*") + expected = Series( [ ["BAD__", "BAD"], np.nan, @@ -547,86 +538,111 @@ def test_findall(): ] ) - assert isinstance(rs, Series) - tm.assert_almost_equal(rs, xp) + tm.assert_series_equal(result, expected) -def test_find(): - values = Series(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXXX"]) - result = values.str.find("EF") - tm.assert_series_equal(result, Series([4, 3, 1, 0, -1])) - expected = np.array([v.find("EF") for v in values.values], dtype=np.int64) - tm.assert_numpy_array_equal(result.values, expected) +def test_find(any_string_dtype): + ser = Series( + ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXXX"], dtype=any_string_dtype + ) + expected_dtype = np.int64 if any_string_dtype == "object" else "Int64" - result = values.str.rfind("EF") - tm.assert_series_equal(result, Series([4, 5, 7, 4, -1])) - expected = np.array([v.rfind("EF") for v in values.values], dtype=np.int64) - tm.assert_numpy_array_equal(result.values, expected) + result = ser.str.find("EF") + expected = Series([4, 3, 1, 0, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.find("EF") for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) - result = values.str.find("EF", 3) - tm.assert_series_equal(result, Series([4, 3, 7, 4, -1])) - expected = np.array([v.find("EF", 3) for v in values.values], dtype=np.int64) - tm.assert_numpy_array_equal(result.values, expected) + result = ser.str.rfind("EF") + expected = Series([4, 5, 7, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.rfind("EF") for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) + + result = ser.str.find("EF", 3) + expected = Series([4, 3, 7, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.find("EF", 3) for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) + + result = ser.str.rfind("EF", 3) + expected = Series([4, 5, 7, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.rfind("EF", 3) for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) - result = values.str.rfind("EF", 3) - tm.assert_series_equal(result, Series([4, 5, 7, 4, -1])) - expected = np.array([v.rfind("EF", 3) for v in values.values], dtype=np.int64) - tm.assert_numpy_array_equal(result.values, expected) + result = ser.str.find("EF", 3, 6) + expected = Series([4, 3, -1, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.find("EF", 3, 6) for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) - result = values.str.find("EF", 3, 6) - tm.assert_series_equal(result, Series([4, 3, -1, 4, -1])) - expected = np.array([v.find("EF", 3, 6) for v in values.values], dtype=np.int64) - tm.assert_numpy_array_equal(result.values, expected) + result = ser.str.rfind("EF", 3, 6) + expected = Series([4, 3, -1, 4, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + expected = np.array([v.rfind("EF", 3, 6) for v in np.array(ser)], dtype=np.int64) + tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected) - result = values.str.rfind("EF", 3, 6) - tm.assert_series_equal(result, Series([4, 3, -1, 4, -1])) - expected = np.array([v.rfind("EF", 3, 6) for v in values.values], dtype=np.int64) - tm.assert_numpy_array_equal(result.values, expected) +def test_find_bad_arg_raises(any_string_dtype): + ser = Series([], dtype=any_string_dtype) with pytest.raises(TypeError, match="expected a string object, not int"): - result = values.str.find(0) + ser.str.find(0) with pytest.raises(TypeError, match="expected a string object, not int"): - result = values.str.rfind(0) + ser.str.rfind(0) -def test_find_nan(): - values = Series(["ABCDEFG", np.nan, "DEFGHIJEF", np.nan, "XXXX"]) - result = values.str.find("EF") - tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1])) +def test_find_nan(any_string_dtype): + ser = Series( + ["ABCDEFG", np.nan, "DEFGHIJEF", np.nan, "XXXX"], dtype=any_string_dtype + ) + expected_dtype = np.float64 if any_string_dtype == "object" else "Int64" - result = values.str.rfind("EF") - tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1])) + result = ser.str.find("EF") + expected = Series([4, np.nan, 1, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) - result = values.str.find("EF", 3) - tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1])) + result = ser.str.rfind("EF") + expected = Series([4, np.nan, 7, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) - result = values.str.rfind("EF", 3) - tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1])) + result = ser.str.find("EF", 3) + expected = Series([4, np.nan, 7, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) - result = values.str.find("EF", 3, 6) - tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1])) + result = ser.str.rfind("EF", 3) + expected = Series([4, np.nan, 7, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) - result = values.str.rfind("EF", 3, 6) - tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1])) + result = ser.str.find("EF", 3, 6) + expected = Series([4, np.nan, -1, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + result = ser.str.rfind("EF", 3, 6) + expected = Series([4, np.nan, -1, np.nan, -1], dtype=expected_dtype) + tm.assert_series_equal(result, expected) -def test_translate(): - def _check(result, expected): - if isinstance(result, Series): - tm.assert_series_equal(result, expected) - else: - tm.assert_index_equal(result, expected) - for klass in [Series, Index]: - s = klass(["abcdefg", "abcc", "cdddfg", "cdefggg"]) - table = str.maketrans("abc", "cde") - result = s.str.translate(table) - expected = klass(["cdedefg", "cdee", "edddfg", "edefggg"]) - _check(result, expected) +def test_translate(index_or_series, any_string_dtype): + obj = index_or_series( + ["abcdefg", "abcc", "cdddfg", "cdefggg"], dtype=any_string_dtype + ) + table = str.maketrans("abc", "cde") + result = obj.str.translate(table) + expected = index_or_series( + ["cdedefg", "cdee", "edddfg", "edefggg"], dtype=any_string_dtype + ) + if index_or_series is Series: + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + +def test_translate_mixed_object(): # Series with non-string values s = Series(["a", "b", "c", 1.2]) + table = str.maketrans("abc", "cde") expected = Series(["c", "d", "e", np.nan]) result = s.str.translate(table) tm.assert_series_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/41471
2021-05-14T14:52:56Z
2021-05-14T16:05:55Z
2021-05-14T16:05:54Z
2021-05-14T17:10:49Z
[ArrowStringArray] TST: parametrize (part) pandas/tests/strings/test_api.py
diff --git a/pandas/tests/strings/test_api.py b/pandas/tests/strings/test_api.py index 7f864a503486e..ec8b5bfa11ad5 100644 --- a/pandas/tests/strings/test_api.py +++ b/pandas/tests/strings/test_api.py @@ -10,11 +10,11 @@ from pandas.core import strings as strings -def test_api(): +def test_api(any_string_dtype): # GH 6106, GH 9322 assert Series.str is strings.StringMethods - assert isinstance(Series([""]).str, strings.StringMethods) + assert isinstance(Series([""], dtype=any_string_dtype).str, strings.StringMethods) def test_api_mi_raises(): @@ -74,18 +74,26 @@ def test_api_per_method( reason = None if box is Index and values.size == 0: if method_name in ["partition", "rpartition"] and kwargs.get("expand", True): + raises = TypeError reason = "Method cannot deal with empty Index" elif method_name == "split" and kwargs.get("expand", None): + raises = TypeError reason = "Split fails on empty Series when expand=True" elif method_name == "get_dummies": + raises = ValueError reason = "Need to fortify get_dummies corner cases" - elif box is Index and inferred_dtype == "empty" and dtype == object: - if method_name == "get_dummies": - reason = "Need to fortify get_dummies corner cases" + elif ( + box is Index + and inferred_dtype == "empty" + and dtype == object + and method_name == "get_dummies" + ): + raises = ValueError + reason = "Need to fortify get_dummies corner cases" if reason is not None: - mark = pytest.mark.xfail(reason=reason) + mark = pytest.mark.xfail(raises=raises, reason=reason) request.node.add_marker(mark) t = box(values, dtype=dtype) # explicit dtype to avoid casting @@ -117,9 +125,15 @@ def test_api_per_method( method(*args, **kwargs) -def test_api_for_categorical(any_string_method): +def test_api_for_categorical(any_string_method, any_string_dtype, request): # https://github.com/pandas-dev/pandas/issues/10661 - s = Series(list("aabb")) + + if any_string_dtype == "arrow_string": + # unsupported operand type(s) for +: 'ArrowStringArray' and 'str' + mark = pytest.mark.xfail(raises=TypeError, reason="Not Implemented") + request.node.add_marker(mark) + + s = Series(list("aabb"), dtype=any_string_dtype) s = s + " " + s c = s.astype("category") assert isinstance(c.str, strings.StringMethods) @@ -127,7 +141,7 @@ def test_api_for_categorical(any_string_method): method_name, args, kwargs = any_string_method result = getattr(c.str, method_name)(*args, **kwargs) - expected = getattr(s.str, method_name)(*args, **kwargs) + expected = getattr(s.astype("object").str, method_name)(*args, **kwargs) if isinstance(result, DataFrame): tm.assert_frame_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/41470
2021-05-14T13:13:19Z
2021-05-14T16:07:34Z
2021-05-14T16:07:34Z
2021-05-14T17:04:52Z
TST/CLN: tighten some xfails
diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst index 8f399ef6f1192..77c8d56765e5e 100644 --- a/doc/source/development/code_style.rst +++ b/doc/source/development/code_style.rst @@ -57,7 +57,8 @@ xfail during the testing phase. To do so, use the ``request`` fixture: import pytest def test_xfail(request): - request.node.add_marker(pytest.mark.xfail(reason="Indicate why here")) + mark = pytest.mark.xfail(raises=TypeError, reason="Indicate why here") + request.node.add_marker(mark) xfail is not to be used for tests involving failure due to invalid user arguments. For these tests, we need to verify the correct exception type and error message diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index cee8a0218e9e8..a724f3d9f2a7d 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -164,8 +164,9 @@ def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how): if len(args) > 1 and how == "agg": request.node.add_marker( pytest.mark.xfail( + raises=TypeError, reason="agg/apply signature mismatch - agg passes 2nd " - "argument to func" + "argument to func", ) ) result = getattr(float_frame, how)(func, *args, **kwds) diff --git a/pandas/tests/apply/test_frame_transform.py b/pandas/tests/apply/test_frame_transform.py index 18c36e4096b2b..9050fab702881 100644 --- a/pandas/tests/apply/test_frame_transform.py +++ b/pandas/tests/apply/test_frame_transform.py @@ -173,7 +173,9 @@ def test_transform_bad_dtype(op, frame_or_series, request): # GH 35964 if op == "rank": request.node.add_marker( - pytest.mark.xfail(reason="GH 40418: rank does not raise a TypeError") + pytest.mark.xfail( + raises=ValueError, reason="GH 40418: rank does not raise a TypeError" + ) ) obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py index bfa88f54e4f10..88c3ad228f8c3 100644 --- a/pandas/tests/apply/test_series_apply.py +++ b/pandas/tests/apply/test_series_apply.py @@ -262,7 +262,9 @@ def test_transform_partial_failure(op, request): # GH 35964 if op in ("ffill", "bfill", "pad", "backfill", "shift"): request.node.add_marker( - pytest.mark.xfail(reason=f"{op} is successful on any dtype") + pytest.mark.xfail( + raises=AssertionError, reason=f"{op} is successful on any dtype" + ) ) if op in ("rank", "fillna"): pytest.skip(f"{op} doesn't raise TypeError on object") diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index f1815b3e05367..1bbe90f3cb58c 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -135,7 +135,8 @@ def test_compare_scalar_na(self, op, interval_array, nulls_fixture, request): if nulls_fixture is pd.NA and interval_array.dtype.subtype != "int64": mark = pytest.mark.xfail( - reason="broken for non-integer IntervalArray; see GH 31882" + raises=AssertionError, + reason="broken for non-integer IntervalArray; see GH 31882", ) request.node.add_marker(mark) @@ -220,7 +221,7 @@ def test_compare_list_like_nan(self, op, interval_array, nulls_fixture, request) if nulls_fixture is pd.NA and interval_array.dtype.subtype != "i8": reason = "broken for non-integer IntervalArray; see GH 31882" - mark = pytest.mark.xfail(reason=reason) + mark = pytest.mark.xfail(raises=AssertionError, reason=reason) request.node.add_marker(mark) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/arrays/masked/test_arithmetic.py b/pandas/tests/arrays/masked/test_arithmetic.py index adb52fce17f8b..29998831777f8 100644 --- a/pandas/tests/arrays/masked/test_arithmetic.py +++ b/pandas/tests/arrays/masked/test_arithmetic.py @@ -170,7 +170,9 @@ def test_unary_op_does_not_propagate_mask(data, op, request): data, _ = data if data.dtype in ["Float32", "Float64"] and op == "__invert__": request.node.add_marker( - pytest.mark.xfail(reason="invert is not implemented for float ea dtypes") + pytest.mark.xfail( + raises=TypeError, reason="invert is not implemented for float ea dtypes" + ) ) s = pd.Series(data) result = getattr(s, op)() diff --git a/pandas/tests/arrays/masked/test_arrow_compat.py b/pandas/tests/arrays/masked/test_arrow_compat.py index e06b8749fbf11..193017ddfcadf 100644 --- a/pandas/tests/arrays/masked/test_arrow_compat.py +++ b/pandas/tests/arrays/masked/test_arrow_compat.py @@ -173,7 +173,7 @@ def test_from_arrow_type_error(request, data): # TODO numeric dtypes cast any incoming array to the correct dtype # instead of erroring request.node.add_marker( - pytest.mark.xfail(reason="numeric dtypes don't error but cast") + pytest.mark.xfail(raises=None, reason="numeric dtypes don't error but cast") ) arr = pa.array(data).cast("string") diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index 79cf8298ab1a6..2ae60a90fee60 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -132,7 +132,7 @@ def test_float_scalar( elif op is ops.rfloordiv and scalar == 0: pass else: - mark = pytest.mark.xfail(reason="GH#38172") + mark = pytest.mark.xfail(raises=AssertionError, reason="GH#38172") request.node.add_marker(mark) values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) @@ -177,11 +177,13 @@ def test_float_same_index_with_nans( # when sp_index are the same op = all_arithmetic_functions - if not np_version_under1p20: - if op is ops.rfloordiv: - if not (mix and kind == "block"): - mark = pytest.mark.xfail(reason="GH#38172") - request.node.add_marker(mark) + if ( + not np_version_under1p20 + and op is ops.rfloordiv + and not (mix and kind == "block") + ): + mark = pytest.mark.xfail(raises=AssertionError, reason="GH#38172") + request.node.add_marker(mark) values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) @@ -358,10 +360,13 @@ def test_bool_array_logical(self, kind, fill_value): def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request): op = all_arithmetic_functions - if not np_version_under1p20: - if op in [operator.floordiv, ops.rfloordiv] and mix: - mark = pytest.mark.xfail(reason="GH#38172") - request.node.add_marker(mark) + if ( + not np_version_under1p20 + and op in [operator.floordiv, ops.rfloordiv] + and mix + ): + mark = pytest.mark.xfail(raises=AssertionError, reason="GH#38172") + request.node.add_marker(mark) rdtype = "int64" diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 17d05ebeb0fc5..3205664e7c80a 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -115,10 +115,10 @@ def test_astype_roundtrip(dtype, request): def test_add(dtype, request): if dtype == "arrow_string": reason = ( - "TypeError: unsupported operand type(s) for +: 'ArrowStringArray' and " + "unsupported operand type(s) for +: 'ArrowStringArray' and " "'ArrowStringArray'" ) - mark = pytest.mark.xfail(reason=reason) + mark = pytest.mark.xfail(raises=TypeError, reason=reason) request.node.add_marker(mark) a = pd.Series(["a", "b", "c", None, None], dtype=dtype) @@ -143,7 +143,7 @@ def test_add(dtype, request): def test_add_2d(dtype, request): if dtype == "arrow_string": reason = "Failed: DID NOT RAISE <class 'ValueError'>" - mark = pytest.mark.xfail(reason=reason) + mark = pytest.mark.xfail(raises=None, reason=reason) request.node.add_marker(mark) a = pd.array(["a", "b", "c"], dtype=dtype) @@ -158,11 +158,8 @@ def test_add_2d(dtype, request): def test_add_sequence(dtype, request): if dtype == "arrow_string": - reason = ( - "TypeError: unsupported operand type(s) for +: 'ArrowStringArray' " - "and 'list'" - ) - mark = pytest.mark.xfail(reason=reason) + reason = "unsupported operand type(s) for +: 'ArrowStringArray' and 'list'" + mark = pytest.mark.xfail(raises=TypeError, reason=reason) request.node.add_marker(mark) a = pd.array(["a", "b", None, None], dtype=dtype) @@ -179,10 +176,8 @@ def test_add_sequence(dtype, request): def test_mul(dtype, request): if dtype == "arrow_string": - reason = ( - "TypeError: unsupported operand type(s) for *: 'ArrowStringArray' and 'int'" - ) - mark = pytest.mark.xfail(reason=reason) + reason = "unsupported operand type(s) for *: 'ArrowStringArray' and 'int'" + mark = pytest.mark.xfail(raises=TypeError, reason=reason) request.node.add_marker(mark) a = pd.array(["a", "b", None], dtype=dtype) @@ -246,7 +241,7 @@ def test_comparison_methods_scalar_pd_na(all_compare_operators, dtype): def test_comparison_methods_scalar_not_string(all_compare_operators, dtype, request): if all_compare_operators not in ["__eq__", "__ne__"]: reason = "comparison op not supported between instances of 'str' and 'int'" - mark = pytest.mark.xfail(reason=reason) + mark = pytest.mark.xfail(raises=TypeError, reason=reason) request.node.add_marker(mark) op_name = all_compare_operators @@ -262,11 +257,9 @@ def test_comparison_methods_scalar_not_string(all_compare_operators, dtype, requ def test_comparison_methods_array(all_compare_operators, dtype, request): if dtype == "arrow_string": - if all_compare_operators in ["__eq__", "__ne__"]: - reason = "NotImplementedError: Neither scalar nor ArrowStringArray" - else: - reason = "AssertionError: left is not an ExtensionArray" - mark = pytest.mark.xfail(reason=reason) + mark = pytest.mark.xfail( + raises=AssertionError, reason="left is not an ExtensionArray" + ) request.node.add_marker(mark) op_name = all_compare_operators @@ -309,8 +302,9 @@ def test_constructor_raises(cls): @pytest.mark.parametrize("copy", [True, False]) def test_from_sequence_no_mutate(copy, cls, request): if cls is ArrowStringArray and copy is False: - reason = "AssertionError: numpy array are different" - mark = pytest.mark.xfail(reason=reason) + mark = pytest.mark.xfail( + raises=AssertionError, reason="numpy array are different" + ) request.node.add_marker(mark) nan_arr = np.array(["a", np.nan], dtype=object) @@ -333,8 +327,8 @@ def test_from_sequence_no_mutate(copy, cls, request): def test_astype_int(dtype, request): if dtype == "arrow_string": - reason = "TypeError: Cannot interpret 'Int64Dtype()' as a data type" - mark = pytest.mark.xfail(reason=reason) + reason = "Cannot interpret 'Int64Dtype()' as a data type" + mark = pytest.mark.xfail(raises=TypeError, reason=reason) request.node.add_marker(mark) arr = pd.array(["1", pd.NA, "3"], dtype=dtype) @@ -349,12 +343,10 @@ def test_astype_float(dtype, any_float_allowed_nullable_dtype, request): if dtype == "arrow_string": if any_float_allowed_nullable_dtype in {"Float32", "Float64"}: - reason = "TypeError: Cannot interpret 'Float32Dtype()' as a data type" + reason = "Cannot interpret 'Float32Dtype()' as a data type" else: - reason = ( - "TypeError: float() argument must be a string or a number, not 'NAType'" - ) - mark = pytest.mark.xfail(reason=reason) + reason = "float() argument must be a string or a number, not 'NAType'" + mark = pytest.mark.xfail(raises=TypeError, reason=reason) request.node.add_marker(mark) ser = pd.Series(["1.1", pd.NA, "3.3"], dtype=dtype) @@ -376,8 +368,8 @@ def test_reduce(skipna, dtype): @pytest.mark.parametrize("skipna", [True, False]) def test_min_max(method, skipna, dtype, request): if dtype == "arrow_string": - reason = "AttributeError: 'ArrowStringArray' object has no attribute 'max'" - mark = pytest.mark.xfail(reason=reason) + reason = "'ArrowStringArray' object has no attribute 'max'" + mark = pytest.mark.xfail(raises=AttributeError, reason=reason) request.node.add_marker(mark) arr = pd.Series(["a", "b", "c", None], dtype=dtype) @@ -394,13 +386,12 @@ def test_min_max(method, skipna, dtype, request): def test_min_max_numpy(method, box, dtype, request): if dtype == "arrow_string": if box is pd.array: - reason = ( - "TypeError: '<=' not supported between instances of 'str' and " - "'NoneType'" - ) + raises = TypeError + reason = "'<=' not supported between instances of 'str' and 'NoneType'" else: - reason = "AttributeError: 'ArrowStringArray' object has no attribute 'max'" - mark = pytest.mark.xfail(reason=reason) + raises = AttributeError + reason = "'ArrowStringArray' object has no attribute 'max'" + mark = pytest.mark.xfail(raises=raises, reason=reason) request.node.add_marker(mark) arr = box(["a", "b", "c", None], dtype=dtype) @@ -425,10 +416,10 @@ def test_fillna_args(dtype, request): if dtype == "arrow_string": reason = ( - "AssertionError: Regex pattern \"Cannot set non-string value '1' into " + "Regex pattern \"Cannot set non-string value '1' into " "a StringArray.\" does not match 'Scalar must be NA or str'" ) - mark = pytest.mark.xfail(reason=reason) + mark = pytest.mark.xfail(raises=AssertionError, reason=reason) request.node.add_marker(mark) arr = pd.array(["a", pd.NA], dtype=dtype) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 771d60b000a7d..8581e9a20526f 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -306,7 +306,9 @@ def test_searchsorted_castable_strings(self, arr1d, box, request): # If we have e.g. tzutc(), when we cast to string and parse # back we get pytz.UTC, and then consider them different timezones # so incorrectly raise. - mark = pytest.mark.xfail(reason="timezone comparisons inconsistent") + mark = pytest.mark.xfail( + raises=TypeError, reason="timezone comparisons inconsistent" + ) request.node.add_marker(mark) arr = arr1d @@ -471,7 +473,9 @@ def test_setitem_strs(self, arr1d, request): # If we have e.g. tzutc(), when we cast to string and parse # back we get pytz.UTC, and then consider them different timezones # so incorrectly raise. - mark = pytest.mark.xfail(reason="timezone comparisons inconsistent") + mark = pytest.mark.xfail( + raises=TypeError, reason="timezone comparisons inconsistent" + ) request.node.add_marker(mark) # Setting list-like of strs
https://api.github.com/repos/pandas-dev/pandas/pulls/41469
2021-05-14T12:53:14Z
2021-05-14T16:07:01Z
2021-05-14T16:07:01Z
2021-05-14T17:07:25Z
DOC: freeze old whatsnew notes part 1 #6856
diff --git a/doc/source/whatsnew/v0.5.0.rst b/doc/source/whatsnew/v0.5.0.rst index 7447a10fa1d6b..8757d9c887785 100644 --- a/doc/source/whatsnew/v0.5.0.rst +++ b/doc/source/whatsnew/v0.5.0.rst @@ -6,12 +6,6 @@ Version 0.5.0 (October 24, 2011) {{ header }} -.. ipython:: python - :suppress: - - from pandas import * # noqa F401, F403 - - New features ~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.6.0.rst b/doc/source/whatsnew/v0.6.0.rst index 253ca4d4188e5..19e2e85c09a87 100644 --- a/doc/source/whatsnew/v0.6.0.rst +++ b/doc/source/whatsnew/v0.6.0.rst @@ -5,12 +5,6 @@ Version 0.6.0 (November 25, 2011) {{ header }} -.. ipython:: python - :suppress: - - from pandas import * # noqa F401, F403 - - New features ~~~~~~~~~~~~ - :ref:`Added <reshaping.melt>` ``melt`` function to ``pandas.core.reshape`` diff --git a/doc/source/whatsnew/v0.7.0.rst b/doc/source/whatsnew/v0.7.0.rst index 2fe686d8858a2..52747f2992dc4 100644 --- a/doc/source/whatsnew/v0.7.0.rst +++ b/doc/source/whatsnew/v0.7.0.rst @@ -31,10 +31,22 @@ New features - Handle differently-indexed output values in ``DataFrame.apply`` (:issue:`498`) -.. ipython:: python +.. code-block:: ipython - df = pd.DataFrame(np.random.randn(10, 4)) - df.apply(lambda x: x.describe()) + In [1]: df = pd.DataFrame(np.random.randn(10, 4)) + In [2]: df.apply(lambda x: x.describe()) + Out[2]: + 0 1 2 3 + count 10.000000 10.000000 10.000000 10.000000 + mean 0.190912 -0.395125 -0.731920 -0.403130 + std 0.730951 0.813266 1.112016 0.961912 + min -0.861849 -2.104569 -1.776904 -1.469388 + 25% -0.411391 -0.698728 -1.501401 -1.076610 + 50% 0.380863 -0.228039 -1.191943 -1.004091 + 75% 0.658444 0.057974 -0.034326 0.461706 + max 1.212112 0.577046 1.643563 1.071804 + + [8 rows x 4 columns] - :ref:`Add<advanced.reorderlevels>` ``reorder_levels`` method to Series and DataFrame (:issue:`534`) @@ -116,13 +128,31 @@ One of the potentially riskiest API changes in 0.7.0, but also one of the most important, was a complete review of how **integer indexes** are handled with regard to label-based indexing. Here is an example: -.. ipython:: python +.. code-block:: ipython - s = pd.Series(np.random.randn(10), index=range(0, 20, 2)) - s - s[0] - s[2] - s[4] + In [3]: s = pd.Series(np.random.randn(10), index=range(0, 20, 2)) + In [4]: s + Out[4]: + 0 -1.294524 + 2 0.413738 + 4 0.276662 + 6 -0.472035 + 8 -0.013960 + 10 -0.362543 + 12 -0.006154 + 14 -0.923061 + 16 0.895717 + 18 0.805244 + Length: 10, dtype: float64 + + In [5]: s[0] + Out[5]: -1.2945235902555294 + + In [6]: s[2] + Out[6]: 0.41373810535784006 + + In [7]: s[4] + Out[7]: 0.2766617129497566 This is all exactly identical to the behavior before. However, if you ask for a key **not** contained in the Series, in versions 0.6.1 and prior, Series would @@ -235,22 +265,65 @@ slice to a Series when getting and setting values via ``[]`` (i.e. the ``__getitem__`` and ``__setitem__`` methods). The behavior will be the same as passing similar input to ``ix`` **except in the case of integer indexing**: -.. ipython:: python +.. code-block:: ipython - s = pd.Series(np.random.randn(6), index=list('acegkm')) - s - s[['m', 'a', 'c', 'e']] - s['b':'l'] - s['c':'k'] + In [8]: s = pd.Series(np.random.randn(6), index=list('acegkm')) + + In [9]: s + Out[9]: + a -1.206412 + c 2.565646 + e 1.431256 + g 1.340309 + k -1.170299 + m -0.226169 + Length: 6, dtype: float64 + + In [10]: s[['m', 'a', 'c', 'e']] + Out[10]: + m -0.226169 + a -1.206412 + c 2.565646 + e 1.431256 + Length: 4, dtype: float64 + + In [11]: s['b':'l'] + Out[11]: + c 2.565646 + e 1.431256 + g 1.340309 + k -1.170299 + Length: 4, dtype: float64 + + In [12]: s['c':'k'] + Out[12]: + c 2.565646 + e 1.431256 + g 1.340309 + k -1.170299 + Length: 4, dtype: float64 In the case of integer indexes, the behavior will be exactly as before (shadowing ``ndarray``): -.. ipython:: python +.. code-block:: ipython - s = pd.Series(np.random.randn(6), index=range(0, 12, 2)) - s[[4, 0, 2]] - s[1:5] + In [13]: s = pd.Series(np.random.randn(6), index=range(0, 12, 2)) + + In [14]: s[[4, 0, 2]] + Out[14]: + 4 0.132003 + 0 0.410835 + 2 0.813850 + Length: 3, dtype: float64 + + In [15]: s[1:5] + Out[15]: + 2 0.813850 + 4 0.132003 + 6 -0.827317 + 8 -0.076467 + Length: 4, dtype: float64 If you wish to do indexing with sequences and slicing on an integer index with label semantics, use ``ix``. diff --git a/doc/source/whatsnew/v0.7.3.rst b/doc/source/whatsnew/v0.7.3.rst index 4ca31baf560bb..5da6bef0c4f03 100644 --- a/doc/source/whatsnew/v0.7.3.rst +++ b/doc/source/whatsnew/v0.7.3.rst @@ -51,21 +51,37 @@ NA boolean comparison API change Reverted some changes to how NA values (represented typically as ``NaN`` or ``None``) are handled in non-numeric Series: -.. ipython:: python +.. code-block:: ipython - series = pd.Series(["Steve", np.nan, "Joe"]) - series == "Steve" - series != "Steve" + In [1]: series = pd.Series(["Steve", np.nan, "Joe"]) + + In [2]: series == "Steve" + Out[2]: + 0 True + 1 False + 2 False + Length: 3, dtype: bool + + In [3]: series != "Steve" + Out[3]: + 0 False + 1 True + 2 True + Length: 3, dtype: bool In comparisons, NA / NaN will always come through as ``False`` except with ``!=`` which is ``True``. *Be very careful* with boolean arithmetic, especially negation, in the presence of NA data. You may wish to add an explicit NA filter into boolean array operations if you are worried about this: -.. ipython:: python +.. code-block:: ipython + + In [4]: mask = series == "Steve" - mask = series == "Steve" - series[mask & series.notnull()] + In [5]: series[mask & series.notnull()] + Out[5]: + 0 Steve + Length: 1, dtype: object While propagating NA in comparisons may seem like the right behavior to some users (and you could argue on purely technical grounds that this is the right @@ -80,21 +96,51 @@ Other API changes When calling ``apply`` on a grouped Series, the return value will also be a Series, to be more consistent with the ``groupby`` behavior with DataFrame: -.. ipython:: python - :okwarning: - - df = pd.DataFrame( - { - "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], - "B": ["one", "one", "two", "three", "two", "two", "one", "three"], - "C": np.random.randn(8), - "D": np.random.randn(8), - } - ) - df - grouped = df.groupby("A")["C"] - grouped.describe() - grouped.apply(lambda x: x.sort_values()[-2:]) # top 2 values +.. code-block:: ipython + + In [6]: df = pd.DataFrame( + ...: { + ...: "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + ...: "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + ...: "C": np.random.randn(8), + ...: "D": np.random.randn(8), + ...: } + ...: ) + ...: + + In [7]: df + Out[7]: + A B C D + 0 foo one 0.469112 -0.861849 + 1 bar one -0.282863 -2.104569 + 2 foo two -1.509059 -0.494929 + 3 bar three -1.135632 1.071804 + 4 foo two 1.212112 0.721555 + 5 bar two -0.173215 -0.706771 + 6 foo one 0.119209 -1.039575 + 7 foo three -1.044236 0.271860 + + [8 rows x 4 columns] + + In [8]: grouped = df.groupby("A")["C"] + + In [9]: grouped.describe() + Out[9]: + count mean std min 25% 50% 75% max + A + bar 3.0 -0.530570 0.526860 -1.135632 -0.709248 -0.282863 -0.228039 -0.173215 + foo 5.0 -0.150572 1.113308 -1.509059 -1.044236 0.119209 0.469112 1.212112 + + [2 rows x 8 columns] + + In [10]: grouped.apply(lambda x: x.sort_values()[-2:]) # top 2 values + Out[10]: + A + bar 1 -0.282863 + 5 -0.173215 + foo 0 0.469112 + 4 1.212112 + Name: C, Length: 4, dtype: float64 .. _whatsnew_0.7.3.contributors:
xref #6856
https://api.github.com/repos/pandas-dev/pandas/pulls/41464
2021-05-14T03:02:11Z
2021-05-14T16:08:45Z
2021-05-14T16:08:45Z
2022-11-18T02:21:36Z
Fix deprecation warnings for empty series in docstrings
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0d39f13afc426..a09cc0a6324c0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11359,7 +11359,7 @@ def _doc_params(cls): True >>> pd.Series([True, False]).all() False ->>> pd.Series([]).all() +>>> pd.Series([], dtype="float64").all() True >>> pd.Series([np.nan]).all() True @@ -11727,7 +11727,7 @@ def _doc_params(cls): False >>> pd.Series([True, False]).any() True ->>> pd.Series([]).any() +>>> pd.Series([], dtype="float64").any() False >>> pd.Series([np.nan]).any() False @@ -11815,13 +11815,13 @@ def _doc_params(cls): By default, the sum of an empty or all-NA Series is ``0``. ->>> pd.Series([]).sum() # min_count=0 is the default +>>> pd.Series([], dtype="float64").sum() # min_count=0 is the default 0.0 This can be controlled with the ``min_count`` parameter. For example, if you'd like the sum of an empty series to be NaN, pass ``min_count=1``. ->>> pd.Series([]).sum(min_count=1) +>>> pd.Series([], dtype="float64").sum(min_count=1) nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and @@ -11862,12 +11862,12 @@ def _doc_params(cls): -------- By default, the product of an empty or all-NA Series is ``1`` ->>> pd.Series([]).prod() +>>> pd.Series([], dtype="float64").prod() 1.0 This can be controlled with the ``min_count`` parameter ->>> pd.Series([]).prod(min_count=1) +>>> pd.Series([], dtype="float64").prod(min_count=1) nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
Fixing the deprecation warnings
https://api.github.com/repos/pandas-dev/pandas/pulls/41463
2021-05-13T22:35:42Z
2021-05-13T23:29:13Z
2021-05-13T23:29:13Z
2021-05-14T20:15:33Z
CI: Fix changed flake8 error message after upgrade
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2f46190ef5eb7..db3fc1853ea71 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -35,7 +35,7 @@ repos: exclude: ^pandas/_libs/src/(klib|headers)/ args: [--quiet, '--extensions=c,h', '--headers=h', --recursive, '--filter=-readability/casting,-runtime/int,-build/include_subdir'] - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.1 + rev: 3.9.2 hooks: - id: flake8 additional_dependencies: @@ -75,7 +75,7 @@ repos: hooks: - id: yesqa additional_dependencies: - - flake8==3.9.1 + - flake8==3.9.2 - flake8-comprehensions==3.1.0 - flake8-bugbear==21.3.2 - pandas-dev-flaker==0.2.0 diff --git a/environment.yml b/environment.yml index 338331b54c824..1368c402d9c68 100644 --- a/environment.yml +++ b/environment.yml @@ -20,7 +20,7 @@ dependencies: # code checks - black=20.8b1 - cpplint - - flake8=3.9.1 + - flake8=3.9.2 - flake8-bugbear=21.3.2 # used by flake8, find likely bugs - flake8-comprehensions=3.1.0 # used by flake8, linting of unnecessary comprehensions - isort>=5.2.1 # check that imports are in the right order diff --git a/requirements-dev.txt b/requirements-dev.txt index 3c1b91220c3fe..2c109c2d3aac0 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -8,7 +8,7 @@ asv cython>=0.29.21 black==20.8b1 cpplint -flake8==3.9.1 +flake8==3.9.2 flake8-bugbear==21.3.2 flake8-comprehensions==3.1.0 isort>=5.2.1 diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 7e4c68ddc183b..cbf3e84044d53 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -165,7 +165,7 @@ def test_bad_class(self, capsys): "indentation_is_not_a_multiple_of_four", # with flake8 3.9.0, the message ends with four spaces, # whereas in earlier versions, it ended with "four" - ("flake8 error: E111 indentation is not a multiple of ",), + ("flake8 error: E111 indentation is not a multiple of 4",), ), ( "BadDocstrings",
Upgrade caused ci on 1.2.x to fail. Should backport probably
https://api.github.com/repos/pandas-dev/pandas/pulls/41462
2021-05-13T22:25:55Z
2021-05-14T00:27:45Z
2021-05-14T00:27:45Z
2021-05-14T22:38:03Z
REF: remove _wrap_frame_output
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 71d19cdd877a6..5c28a15532174 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1101,22 +1101,28 @@ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: if self.grouper.nkeys != 1: raise AssertionError("Number of keys must be 1") - axis = self.axis obj = self._obj_with_exclusions result: dict[Hashable, NDFrame | np.ndarray] = {} - if axis != obj._info_axis_number: + if self.axis == 0: # test_pass_args_kwargs_duplicate_columns gets here with non-unique columns for name, data in self: fres = func(data, *args, **kwargs) result[name] = fres else: + # we get here in a number of test_multilevel tests for name in self.indices: data = self.get_group(name, obj=obj) fres = func(data, *args, **kwargs) result[name] = fres - return self._wrap_frame_output(result, obj) + result_index = self.grouper.result_index + other_ax = obj.axes[1 - self.axis] + out = self.obj._constructor(result, index=other_ax, columns=result_index) + if self.axis == 0: + out = out.T + + return out def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame: # only for axis==0 @@ -1568,16 +1574,6 @@ def _gotitem(self, key, ndim: int, subset=None): raise AssertionError("invalid ndim for _gotitem") - def _wrap_frame_output(self, result: dict, obj: DataFrame) -> DataFrame: - result_index = self.grouper.levels[0] - - if self.axis == 0: - return self.obj._constructor( - result, index=obj.columns, columns=result_index - ).T - else: - return self.obj._constructor(result, index=obj.index, columns=result_index) - def _get_data_to_aggregate(self) -> Manager2D: obj = self._obj_with_exclusions if self.axis == 1:
There's too many _wrap_foo_output methods and this one is only used in one place
https://api.github.com/repos/pandas-dev/pandas/pulls/41461
2021-05-13T22:02:52Z
2021-05-13T23:23:22Z
2021-05-13T23:23:22Z
2021-05-13T23:29:41Z
Backport PR #41452 on branch 1.2.x (CI: Pin jinja2 to version lower than 3.0)
diff --git a/environment.yml b/environment.yml index 61c8351070de9..15c1611169427 100644 --- a/environment.yml +++ b/environment.yml @@ -77,7 +77,7 @@ dependencies: - bottleneck>=1.2.1 - ipykernel - ipython>=7.11.1 - - jinja2 # pandas.Styler + - jinja2<3.0.0 # pandas.Styler - matplotlib>=2.2.2 # pandas.plotting, Series.plot, DataFrame.plot - numexpr>=2.6.8 - scipy>=1.2 diff --git a/requirements-dev.txt b/requirements-dev.txt index 595b2ee537e63..f026fd421f937 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -49,7 +49,7 @@ blosc bottleneck>=1.2.1 ipykernel ipython>=7.11.1 -jinja2 +jinja2<3.0.0 matplotlib>=2.2.2 numexpr>=2.6.8 scipy>=1.2
Backport PR #41452: CI: Pin jinja2 to version lower than 3.0
https://api.github.com/repos/pandas-dev/pandas/pulls/41460
2021-05-13T20:49:45Z
2021-05-13T22:17:36Z
2021-05-13T22:17:36Z
2021-05-24T10:13:12Z
DEPR: DatetimeIndex.union with mixed timezones
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 6ceae4dfd8a91..427b16cc02953 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -645,6 +645,7 @@ Deprecations - The ``inplace`` parameter of :meth:`Categorical.remove_categories`, :meth:`Categorical.add_categories`, :meth:`Categorical.reorder_categories`, :meth:`Categorical.rename_categories`, :meth:`Categorical.set_categories` is deprecated and will be removed in a future version (:issue:`37643`) - Deprecated :func:`merge` producing duplicated columns through the ``suffixes`` keyword and already existing columns (:issue:`22818`) - Deprecated setting :attr:`Categorical._codes`, create a new :class:`Categorical` with the desired codes instead (:issue:`40606`) +- Deprecated behavior of :meth:`DatetimeIndex.union` with mixed timezones; in a future version both will be cast to UTC instead of object dtype (:issue:`39328`) - Deprecated using ``usecols`` with out of bounds indices for ``read_csv`` with ``engine="c"`` (:issue:`25623`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 83946491f32a8..a366b49ce3c55 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2930,6 +2930,21 @@ def union(self, other, sort=None): "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) + if ( + isinstance(self, ABCDatetimeIndex) + and isinstance(other, ABCDatetimeIndex) + and self.tz is not None + and other.tz is not None + ): + # GH#39328 + warnings.warn( + "In a future version, the union of DatetimeIndex objects " + "with mismatched timezones will cast both to UTC instead of " + "object dtype. To retain the old behavior, " + "use `index.astype(object).union(other)`", + FutureWarning, + stacklevel=2, + ) dtype = find_common_type([self.dtype, other.dtype]) if self._is_numeric_dtype and other._is_numeric_dtype: diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index bc01d44de0529..8bba11786e3e5 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -1146,7 +1146,10 @@ def test_dti_union_aware(self): rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", tz="US/Eastern") - result = rng.union(rng2) + with tm.assert_produces_warning(FutureWarning): + # # GH#39328 will cast both to UTC + result = rng.union(rng2) + expected = rng.astype("O").union(rng2.astype("O")) tm.assert_index_equal(result, expected) assert result[0].tz.zone == "US/Central"
- [x] closes #39328 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41458
2021-05-13T19:59:06Z
2021-05-14T16:09:46Z
2021-05-14T16:09:46Z
2021-05-14T16:26:06Z
DOC: unpin numpydoc, fix validation script #39688
diff --git a/environment.yml b/environment.yml index 9396210da3635..aa5e9505f9901 100644 --- a/environment.yml +++ b/environment.yml @@ -115,7 +115,7 @@ dependencies: - natsort # DataFrame.sort_values - pip: - git+https://github.com/pydata/pydata-sphinx-theme.git@master - - numpydoc < 1.2 # 2021-02-09 1.2dev breaking CI + - git+https://github.com/numpy/numpydoc.git - pandas-dev-flaker==0.2.0 - types-python-dateutil - types-PyMySQL diff --git a/requirements-dev.txt b/requirements-dev.txt index 3bf9084f55419..6fcba7c347608 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -78,7 +78,7 @@ pyreadstat tabulate>=0.8.3 natsort git+https://github.com/pydata/pydata-sphinx-theme.git@master -numpydoc < 1.2 +git+https://github.com/numpy/numpydoc.git pandas-dev-flaker==0.2.0 types-python-dateutil types-PyMySQL diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 46cfae8e31208..224669937d2df 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -305,8 +305,12 @@ class TestPandasDocstringClass: "name", ["pandas.Series.str.isdecimal", "pandas.Series.str.islower"] ) def test_encode_content_write_to_file(self, name): + from numpydoc.docscrape import get_doc_object + from numpydoc.validate import Validator + # GH25466 - docstr = validate_docstrings.PandasDocstring(name).validate_pep8() + func_obj = get_doc_object(Validator._load_obj(name)) + docstr = validate_docstrings.PandasDocstring(func_obj).validate_pep8() # the list of pep8 errors should be empty assert not list(docstr) diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 7562895d9db3e..1b596e8f88cdb 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -28,8 +28,9 @@ import matplotlib import matplotlib.pyplot as plt import numpy +from numpydoc.docscrape import get_doc_object from numpydoc.validate import ( - Docstring, + Validator, validate, ) @@ -38,7 +39,6 @@ # With template backend, matplotlib plots nothing matplotlib.use("template") - PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"] ERROR_MSGS = { "GL04": "Private classes ({mentioned_private_classes}) should not be " @@ -133,7 +133,7 @@ def get_api_items(api_doc_fd): previous_line = line -class PandasDocstring(Docstring): +class PandasDocstring(Validator): @property def mentioned_private_classes(self): return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc] @@ -145,10 +145,17 @@ def examples_errors(self): runner = doctest.DocTestRunner(optionflags=flags) context = {"np": numpy, "pd": pandas} error_msgs = "" - for test in finder.find(self.raw_doc, self.name, globs=context): - f = io.StringIO() - runner.run(test, out=f.write) - error_msgs += f.getvalue() + name = None + try: + name = self.name + except AttributeError: + if not isinstance(self.obj, property): + name = type(self.obj).__name__ + if name is not None: + for test in finder.find(self.raw_doc, name, globs=context): + f = io.StringIO() + runner.run(test, out=f.write) + error_msgs += f.getvalue() return error_msgs @property @@ -204,7 +211,8 @@ def pandas_validate(func_name: str): dict Information about the docstring and the errors found. """ - doc = PandasDocstring(func_name) + func_obj = get_doc_object(Validator._load_obj(func_name)) + doc = PandasDocstring(func_obj) result = validate(func_name) mentioned_errs = doc.mentioned_private_classes
- [x] closes #39688
https://api.github.com/repos/pandas-dev/pandas/pulls/41456
2021-05-13T19:16:23Z
2021-09-08T02:11:24Z
null
2022-02-04T19:21:50Z
[ArrowStringArray] PERF: Series.str.get_dummies
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index 79ea2a4fba284..0f68d1043b49d 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -249,10 +249,18 @@ def time_rsplit(self, dtype, expand): class Dummies: - def setup(self): - self.s = Series(tm.makeStringIndex(10 ** 5)).str.join("|") + params = ["str", "string", "arrow_string"] + param_names = ["dtype"] + + def setup(self, dtype): + from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401 + + try: + self.s = Series(tm.makeStringIndex(10 ** 5), dtype=dtype).str.join("|") + except ImportError: + raise NotImplementedError - def time_get_dummies(self): + def time_get_dummies(self, dtype): self.s.str.get_dummies("|") diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 2646ddfa45b58..3b4549b55d1aa 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -24,6 +24,7 @@ is_categorical_dtype, is_integer, is_list_like, + is_object_dtype, is_re, ) from pandas.core.dtypes.generic import ( @@ -265,7 +266,11 @@ def _wrap_result( # infer from ndim if expand is not specified expand = result.ndim != 1 - elif expand is True and not isinstance(self._orig, ABCIndex): + elif ( + expand is True + and is_object_dtype(result) + and not isinstance(self._orig, ABCIndex) + ): # required when expand=True is explicitly specified # not needed when inferred diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py index 5d8a63fe481f8..86c90398d0259 100644 --- a/pandas/tests/strings/test_strings.py +++ b/pandas/tests/strings/test_strings.py @@ -301,17 +301,19 @@ def test_isnumeric(any_string_dtype): tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e, dtype=dtype)) -def test_get_dummies(): - s = Series(["a|b", "a|c", np.nan]) +def test_get_dummies(any_string_dtype): + s = Series(["a|b", "a|c", np.nan], dtype=any_string_dtype) result = s.str.get_dummies("|") expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]], columns=list("abc")) tm.assert_frame_equal(result, expected) - s = Series(["a;b", "a", 7]) + s = Series(["a;b", "a", 7], dtype=any_string_dtype) result = s.str.get_dummies(";") expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]], columns=list("7ab")) tm.assert_frame_equal(result, expected) + +def test_get_dummies_index(): # GH9980, GH8028 idx = Index(["a|b", "a|c", "b|c"]) result = idx.str.get_dummies("|") @@ -322,14 +324,18 @@ def test_get_dummies(): tm.assert_index_equal(result, expected) -def test_get_dummies_with_name_dummy(): +def test_get_dummies_with_name_dummy(any_string_dtype): # GH 12180 # Dummies named 'name' should work as expected - s = Series(["a", "b,name", "b"]) + s = Series(["a", "b,name", "b"], dtype=any_string_dtype) result = s.str.get_dummies(",") expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]], columns=["a", "b", "name"]) tm.assert_frame_equal(result, expected) + +def test_get_dummies_with_name_dummy_index(): + # GH 12180 + # Dummies named 'name' should work as expected idx = Index(["a|b", "name|c", "b|name"]) result = idx.str.get_dummies("|")
planning to remove the padding code from _wrap_result eventually, but until then we can skip it when we return a integer array from get_dummies adding tests and benchmarks as precursor to potential changes to _wrap_result #41372 have a working implementation for ArrowStringArray using pyarrow native functions but is slower than object fallback, so am leaving that for a followup. ``` before after ratio [4ec6925c] [091b0b02] <master> <get_dummies> - 2.58±0.02s 655±10ms 0.25 strings.Dummies.time_get_dummies('arrow_string') - 2.58±0.03s 643±9ms 0.25 strings.Dummies.time_get_dummies('string') - 2.59±0.07s 638±7ms 0.25 strings.Dummies.time_get_dummies('str') SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. PERFORMANCE INCREASED. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/41455
2021-05-13T18:28:36Z
2021-05-13T23:25:14Z
2021-05-13T23:25:14Z
2021-05-14T09:17:56Z
DOC: Complete first sentence in DataFrame.hist (#41421).
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 55097054fec88..27f8835968b54 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -132,7 +132,7 @@ def hist_frame( **kwargs, ): """ - Make a histogram of the DataFrame's. + Make a histogram of the DataFrame's columns. A `histogram`_ is a representation of the distribution of data. This function calls :meth:`matplotlib.pyplot.hist`, on each series in @@ -144,7 +144,7 @@ def hist_frame( ---------- data : DataFrame The pandas object holding the data. - column : str or sequence + column : str or sequence, optional If passed, will be used to limit data to a subset of columns. by : object, optional If passed, then used to form histograms for separate groups. @@ -171,7 +171,7 @@ def hist_frame( sharey : bool, default False In case subplots=True, share y axis and set some y axis labels to invisible. - figsize : tuple + figsize : tuple, optional The size in inches of the figure to create. Uses the value in `matplotlib.rcParams` by default. layout : tuple, optional
Complete sentence and mark column and figsize as optional parameters, following suggested fix. - [ ] closes #41421 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41454
2021-05-13T17:34:26Z
2021-05-14T16:09:24Z
2021-05-14T16:09:23Z
2021-05-14T16:09:27Z
CI: Unpin nbformat after bugfix releases
diff --git a/environment.yml b/environment.yml index 30fa7c0dea696..1347ed696f1c2 100644 --- a/environment.yml +++ b/environment.yml @@ -70,7 +70,7 @@ dependencies: # unused (required indirectly may be?) - ipywidgets - - nbformat=5.0.8 + - nbformat - notebook>=5.7.5 - pip diff --git a/requirements-dev.txt b/requirements-dev.txt index 3e421c7715566..a53bedb87241d 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -44,7 +44,7 @@ pytest-instafail seaborn statsmodels ipywidgets -nbformat==5.0.8 +nbformat notebook>=5.7.5 pip blosc
- [x] closes #39176 Pinned this a few months ago, checking if new releases fixed the bugs
https://api.github.com/repos/pandas-dev/pandas/pulls/41453
2021-05-13T13:52:58Z
2021-05-13T17:48:57Z
2021-05-13T17:48:56Z
2021-05-13T17:50:16Z
CI: Pin jinja2 to version lower than 3.0
diff --git a/environment.yml b/environment.yml index 30fa7c0dea696..99ce0d9f9ea01 100644 --- a/environment.yml +++ b/environment.yml @@ -79,7 +79,7 @@ dependencies: - bottleneck>=1.2.1 - ipykernel - ipython>=7.11.1 - - jinja2 # pandas.Styler + - jinja2<3.0.0 # pandas.Styler - matplotlib>=2.2.2 # pandas.plotting, Series.plot, DataFrame.plot - numexpr>=2.6.8 - scipy>=1.2 diff --git a/requirements-dev.txt b/requirements-dev.txt index 3e421c7715566..6ba867f470f8f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -51,7 +51,7 @@ blosc bottleneck>=1.2.1 ipykernel ipython>=7.11.1 -jinja2 +jinja2<3.0.0 matplotlib>=2.2.2 numexpr>=2.6.8 scipy>=1.2
- [x] xref #41450
https://api.github.com/repos/pandas-dev/pandas/pulls/41452
2021-05-13T12:39:19Z
2021-05-13T13:43:03Z
2021-05-13T13:43:03Z
2021-05-24T10:12:29Z
Parse IntervalArray and IntervalIndex from strings
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 9a1435c3f033d..66b3908732ff0 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -5,6 +5,7 @@ le, lt, ) +import re import textwrap from typing import ( Sequence, @@ -574,6 +575,106 @@ def from_tuples( return cls.from_arrays(left, right, closed, copy=False, dtype=dtype) + _interval_shared_docs["from_strings"] = textwrap.dedent( + """ + Construct from string representations of the left and right bounds. + + Parameters + ---------- + data : array-like (1-dimensional) + Strings representing the Interval's to parse. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + dtype : dtype, optional + If None, dtype will be inferred. + + Returns + ------- + %(klass)s + + Raises + ------ + ValueError + When a string cannot be parsed as an Interval + When the dtype of the string cannot be parsed as either float, + Timestamp or Timedelta + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_breaks : Construct an %(klass)s from an array of + splits. + %(klass)s.from_tuples : Construct an %(klass)s from an + array-like of tuples. + + %(examples)s\ + """ + ) + + @classmethod + @Appender( + _interval_shared_docs["from_strings"] + % { + "klass": "IntervalIndex", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.IntervalIndex.from_strings(["(0, 1]", "(1, 2]"]) + IntervalIndex([(0, 1], (1, 2]], + dtype='interval[int64, right]') + """ + ), + } + ) + def _from_sequence_of_strings( + cls: type[IntervalArrayT], + data: Sequence[str], + closed: str = "right", + dtype: Dtype | None = None, + ) -> IntervalArrayT: + # The different closing brackets define which pattern to look for. + brackets = { + "right": ("(", "]"), + "left": ("[", ")"), + "both": ("[", "]"), + "neither": ("(", ")"), + } + pattern = re.compile( + "\\" + brackets[closed][0] + ".*,.*\\" + brackets[closed][1] + ) + + left, right = [], [] + for string in data: + + # Try to match "(left, right]" where 'left' and 'right' are breaks. + breaks_match = pattern.match(string) + + if breaks_match is None: + raise ValueError( + f"Could not find opening '{brackets[closed][0]}' " + f"and closing '{brackets[closed][1]}' " + f"brackets in string: '{string}'" + ) + # Try to split 'left' and 'right' based on a comma and a space. + breaks = breaks_match.string[1:-1].split(", ", 1) + + if len(breaks) != 2: + raise ValueError( + f"Delimiter ', ' (comma + space) not found in string: {string}" + ) + + newleft, newright = _parse_breaks(breaks) + left.append(newleft) + right.append(newright) + + # If dtype was not an IntervalDtype, try to parse it as such. + if dtype is not None and not isinstance(dtype, IntervalDtype): + dtype = IntervalDtype(subtype=dtype, closed=closed) + + return cls.from_arrays(left, right, closed=closed, copy=False, dtype=dtype) + def _validate(self): """ Verify that the IntervalArray is valid. @@ -1725,3 +1826,40 @@ def _maybe_convert_platform_interval(values) -> ArrayLike: if not hasattr(values, "dtype"): return np.asarray(values) return values + + +def _parse_breaks(breaks: list[str]) -> ArrayLike: + """ + Parse string representations of interval breaks. + + The succession to try is: + 1. Numeric (float, int, etc) + 2. Timestamp + 3. Timedelta + + If none work, a ValueError is raised. + + Parameters + ---------- + breaks : A list of strings to parse. + + Returns + ------- + The parsed breaks + """ + from pandas import ( + to_datetime, + to_numeric, + to_timedelta, + ) + + for parser in [to_numeric, to_datetime, to_timedelta]: + try: + return parser(breaks, errors="raise") + except ValueError: + continue + else: + raise ValueError( + "Could not parse string as numeric, Timedelta " + f"or Timestamp Interval: {', '.join(breaks)}" + ) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index a378fd95b9c03..d958c715ae87a 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -9,6 +9,7 @@ from typing import ( Any, Hashable, + Sequence, ) import numpy as np @@ -316,6 +317,36 @@ def from_tuples( arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype) return cls._simple_new(arr, name=name) + @classmethod + @Appender( + _interval_shared_docs["from_strings"] + % { + "klass": "IntervalIndex", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.IntervalIndex.from_strings(["(0, 1]", "(1, 2]"]) + IntervalIndex([(0, 1], (1, 2]], + dtype='interval[int64, right]') + """ + ), + } + ) + def from_strings( + cls, + data: Sequence[str], + closed: str = "right", + dtype: Dtype | None = None, + name: Hashable = None, + ) -> IntervalIndex: + with rewrite_exception("IntervalArray", cls.__name__): + arr = IntervalArray._from_sequence_of_strings( + data=data, dtype=dtype, closed=closed + ) + + return cls._simple_new(arr, name=name) + # -------------------------------------------------------------------- @cache_readonly diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 843885832690f..a307bd5ab8da3 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -890,6 +890,71 @@ def test_is_all_dates(self): assert not year_2017_index._is_all_dates +@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"]) +@pytest.mark.parametrize( + "test_case", + [ + ( + "float64", + [0.0, 0.5, 1.0], + ["0.0, 0.5", "0.5, 1.0"], + ), + ("int64", [0, 5, 10], ["0, 5", "5, 10"]), + ( + "datetime64[ns]", + [Timestamp(2015, 7, 1), Timestamp(2016, 8, 1), Timestamp(2018, 9, 1)], + ["2015-07-01, 2016-08-01", "2016-08-01, 2018-09-01"], + ), + ], +) +def test_from_strings(closed, test_case): + """Test the IntervalIndex.from_strings class method.""" + # See https://github.com/pandas-dev/pandas/pull/41451 + dtype, expected, strings = test_case + + brackets = { + "right": ("(", "]"), + "left": ("[", ")"), + "both": ("[", "]"), + "neither": ("(", ")"), + } + # Assign the brackets associated to the closed type to be tested + interval_strings = [brackets[closed][0] + s + brackets[closed][1] for s in strings] + + # Attempt to infer the type dynamically + tm.assert_index_equal( + IntervalIndex.from_strings(interval_strings, closed=closed), + IntervalIndex.from_breaks(expected, closed=closed), + exact=True, + ) + + # Parse it with a fixed dtype and assert that the result is correct. + tm.assert_index_equal( + IntervalIndex.from_strings( + interval_strings, dtype=np.dtype(dtype), closed=closed + ), + IntervalIndex.from_breaks(expected, closed=closed), + exact=True, + ) + + +@pytest.mark.parametrize( + "wrong_indices", + [ + ("('hello', 'there']", r"Could not parse string as numeric"), + ("(0.1,0.1)", r"Could not find opening '\(' and closing ']'"), + ("(0.0,0.5]", r"Delimiter ', ' .* not found"), + ], +) +def test_from_strings_errors(wrong_indices): + """Validate the error messages from the IntervalIndex.from_strings method.""" + # See https://github.com/pandas-dev/pandas/pull/41451 + string, error = wrong_indices + + with pytest.raises(ValueError, match=error): + IntervalIndex.from_strings([string]) + + def test_dir(): # GH#27571 dir(interval_index) should not raise index = IntervalIndex.from_arrays([0, 1], [1, 2])
Currently, when saving a DataFrame with an IntervalIndex as a CSV, there is no easy way to parse it again. With this PR, class methods are introduced to handle this: ```python import tempfile import numpy as np import pandas as pd # Create a DataFrame with an IntervalIndex df = pd.DataFrame(data=[1, 2, 3], index=pd.IntervalIndex.from_breaks([2., 3., 4., 5.])) # Create a temporary directory to save the csv in temp_dir = tempfile.TemporaryDirectory() df.to_csv(temp_dir.name + 'df.csv') print(df) # Read the DataFrame containing the IntervalIndex column df2 = pd.read_csv(temp_dir.name + 'df.csv') # Convert the column to an IntervalIndex df2.index = pd.IntervalIndex.from_strings(df2.iloc[:, 0]) df2.drop(columns=df2.columns[0], inplace=True) print(df2) # Validate that the original and parsed indices are the same assert np.array_equal(df.index, df2.index) ``` ``` 0 (2.0, 3.0] 1 (3.0, 4.0] 2 (4.0, 5.0] 3 0 (2.0, 3.0] 1 (3.0, 4.0] 2 (4.0, 5.0] 3 ``` As can be seen in the tests, the conversion supports each valid dtype of an Interval and raises descriptive exceptions if it fails. - [X] closes #23595 - [X] tests added / passed - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41451
2021-05-13T10:10:01Z
2022-03-06T01:11:19Z
null
2022-03-06T01:11:19Z
Deprecate inplace=True for reset_index
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 6ceae4dfd8a91..505eab5c11a55 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -646,6 +646,7 @@ Deprecations - Deprecated :func:`merge` producing duplicated columns through the ``suffixes`` keyword and already existing columns (:issue:`22818`) - Deprecated setting :attr:`Categorical._codes`, create a new :class:`Categorical` with the desired codes instead (:issue:`40606`) - Deprecated using ``usecols`` with out of bounds indices for ``read_csv`` with ``engine="c"`` (:issue:`25623`) +- Deprecated ``inplace`` keyword in :meth:`DataFrame.reset_index` (:issue:`16529`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2941b6ac01904..db03a137c2e43 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -44,7 +44,10 @@ properties, ) from pandas._libs.hashtable import duplicated -from pandas._libs.lib import no_default +from pandas._libs.lib import ( + NoDefault, + no_default, +) from pandas._typing import ( AggFuncType, AnyArrayLike, @@ -5571,7 +5574,7 @@ def reset_index( self, level: Hashable | Sequence[Hashable] | None = None, drop: bool = False, - inplace: bool = False, + inplace: bool | NoDefault = no_default, col_level: Hashable = 0, col_fill: Hashable = "", ) -> DataFrame | None: @@ -5716,6 +5719,16 @@ class max type lion mammal 80.5 run monkey mammal NaN jump """ + if inplace is not no_default: + warnings.warn( + "'inplace' will be removed in a future version " + "and the current default behaviour ('inplace=False') will " + "be used. Remove the 'inplace' argument to silence this warning.", + FutureWarning, + stacklevel=2, + ) + else: + inplace = False inplace = validate_bool_kwarg(inplace, "inplace") self._check_inplace_and_allows_duplicate_labels(inplace) if inplace: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a09cc0a6324c0..aa58fafe13cee 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1847,11 +1847,11 @@ def _drop_labels_or_levels(self, keys, axis: int = 0): if axis == 0: # Handle dropping index levels if levels_to_drop: - dropped.reset_index(levels_to_drop, drop=True, inplace=True) + dropped = dropped.reset_index(levels_to_drop, drop=True) # Handle dropping columns labels if labels_to_drop: - dropped.drop(labels_to_drop, axis=1, inplace=True) + dropped = dropped.drop(labels_to_drop, axis=1) else: # Handle dropping column levels if levels_to_drop: diff --git a/pandas/io/sql.py b/pandas/io/sql.py index a347e7a99be8b..c27302c60cc9f 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -900,7 +900,7 @@ def insert_data(self): temp = self.frame.copy() temp.index.names = self.index try: - temp.reset_index(inplace=True) + temp = temp.reset_index() except ValueError as err: raise ValueError(f"duplicate name in index/columns: {err}") from err else: diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index 5a87803ddc21e..87c8ff95e00aa 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -160,7 +160,13 @@ def test_reset_index(self, float_frame): # test resetting in place df = float_frame.copy() reset = float_frame.reset_index() - return_value = df.reset_index(inplace=True) + msg = ( + r"'inplace' will be removed in a future version " + r"and the current default behaviour \('inplace=False'\) will " + r"be used\. Remove the 'inplace' argument to silence this warning\." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = df.reset_index(inplace=True) assert return_value is None tm.assert_frame_equal(df, reset, check_names=False) @@ -179,7 +185,13 @@ def test_reset_index_name(self): ) assert df.reset_index().index.name is None assert df.reset_index(drop=True).index.name is None - return_value = df.reset_index(inplace=True) + msg = ( + r"'inplace' will be removed in a future version " + r"and the current default behaviour \('inplace=False'\) will " + r"be used\. Remove the 'inplace' argument to silence this warning\." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = df.reset_index(inplace=True) assert return_value is None assert df.index.name is None @@ -671,3 +683,16 @@ def test_reset_index_multiindex_nat(): index=pd.DatetimeIndex(["2015-07-01", "2015-07-02", "NaT"], name="tstamp"), ) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("inplace", [True, False]) +def test_inplace_deprecation_warning(inplace): + # GH16529 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"'inplace' will be removed in a future version " + r"and the current default behaviour \('inplace=False'\) will " + r"be used\. Remove the 'inplace' argument to silence this warning\." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.reset_index(inplace=inplace) diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py index 3b2668aea001c..ec591be285d79 100644 --- a/pandas/tests/frame/methods/test_to_csv.py +++ b/pandas/tests/frame/methods/test_to_csv.py @@ -535,7 +535,13 @@ def test_to_csv_headers(self): from_df.to_csv(path, index=False, header=["X", "Y"]) recons = self.read_csv(path) - return_value = recons.reset_index(inplace=True) + msg = ( + r"'inplace' will be removed in a future version " + r"and the current default behaviour \('inplace=False'\) will " + r"be used\. Remove the 'inplace' argument to silence this warning\." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = recons.reset_index(inplace=True) assert return_value is None tm.assert_frame_equal(to_df, recons) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 76cfd77d254f2..77ee331a8d3ab 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -223,7 +223,13 @@ def _check_f(base, f): # reset_index f = lambda x: x.reset_index(inplace=True) - _check_f(data.set_index("a"), f) + msg = ( + r"'inplace' will be removed in a future version " + r"and the current default behaviour \('inplace=False'\) will " + r"be used\. Remove the 'inplace' argument to silence this warning\." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + _check_f(data.set_index("a"), f) # drop_duplicates f = lambda x: x.drop_duplicates(inplace=True) diff --git a/pandas/tests/frame/test_validate.py b/pandas/tests/frame/test_validate.py index e99e0a6863848..14bb0c85fde18 100644 --- a/pandas/tests/frame/test_validate.py +++ b/pandas/tests/frame/test_validate.py @@ -1,5 +1,6 @@ import pytest +import pandas._testing as tm from pandas.core.frame import DataFrame @@ -17,7 +18,6 @@ class TestDataFrameValidate: "query", "eval", "set_index", - "reset_index", "dropna", "drop_duplicates", "sort_values", @@ -39,3 +39,26 @@ def test_validate_bool_args(self, dataframe, func, inplace): with pytest.raises(ValueError, match=msg): getattr(dataframe, func)(**kwargs) + + @pytest.mark.parametrize( + "func", + [ + "reset_index", + ], + ) + @pytest.mark.parametrize("inplace", [1, "True", "False", [1, 2, 3], 5.0]) + def test_validate_bool_args_with_deprecation_warning( + self, dataframe, func, inplace + ): + # GH16529 + msg = 'For argument "inplace" expected type bool' + kwargs = {"inplace": inplace} + + warning_msg = ( + r"'inplace' will be removed in a future version " + r"and the current default behaviour \('inplace=False'\) will " + r"be used\. Remove the 'inplace' argument to silence this warning\." + ) + warning_ctx = tm.assert_produces_warning(FutureWarning, match=warning_msg) + with pytest.raises(ValueError, match=msg), warning_ctx: + getattr(dataframe, func)(**kwargs) diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py index 1b32675ec2d35..f9da951a6ca5c 100644 --- a/pandas/tests/generic/test_duplicate_labels.py +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -427,7 +427,6 @@ def test_dataframe_insert_raises(): [ (operator.methodcaller("set_index", "A", inplace=True), True), (operator.methodcaller("set_axis", ["A", "B"], inplace=True), False), - (operator.methodcaller("reset_index", inplace=True), True), (operator.methodcaller("rename", lambda x: x, inplace=True), False), ], ) @@ -446,6 +445,34 @@ def test_inplace_raises(method, frame_only): method(s) +@pytest.mark.parametrize( + "method, frame_only", + [ + (operator.methodcaller("reset_index", inplace=True), True), + ], +) +def test_inplace_raises_with_deprecation_warning(method, frame_only): + # GH16529 + df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags( + allows_duplicate_labels=False + ) + s = df["A"] + s.flags.allows_duplicate_labels = False + error_msg = "Cannot specify" + + warning_msg = ( + r"'inplace' will be removed in a future version " + r"and the current default behaviour \('inplace=False'\) will " + r"be used\. Remove the 'inplace' argument to silence this warning\." + ) + warning_ctx = tm.assert_produces_warning(FutureWarning, match=warning_msg) + with pytest.raises(ValueError, match=error_msg), warning_ctx: + method(df) + if not frame_only: + with pytest.raises(ValueError, match=error_msg), warning_ctx: + method(s) + + def test_pickle(): a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False) b = tm.round_trip_pickle(a) diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 3b6bfee8f9657..88a33f253e304 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -303,7 +303,13 @@ def test_max_sas_date_iterator(datapath): df = df.applymap(round_datetime_to_ms) except AttributeError: df["dt_as_dt"] = df["dt_as_dt"].apply(round_datetime_to_ms) - df.reset_index(inplace=True, drop=True) + msg = ( + r"'inplace' will be removed in a future version " + r"and the current default behaviour \('inplace=False'\) will " + r"be used\. Remove the 'inplace' argument to silence this warning\." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.reset_index(inplace=True, drop=True) results.append(df) expected = [ pd.DataFrame(
- [ ] xref #16529 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry Per the discussion yesterday, I just wanted to clarify that putting in a deprecation warning (such as thing one) in v1.3.0 gives enough time to remove `inplace` in v2.0.0 if, as is likely, there is not another minor version between v1.3.0 and v2.0.0. Will it be OK to change it to a `FutureWarning` in a patch release of v1.3? If this is OK, then I'm running a little sprint on Saturday for PyLadiesLondon, and putting in more of these warning will make for a good sprint issue
https://api.github.com/repos/pandas-dev/pandas/pulls/41449
2021-05-13T08:11:54Z
2021-05-15T07:19:20Z
null
2021-05-15T07:19:20Z
WEB: Fix maintainers grid not displaying correctly (GH41438)
diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md index 39f63202e1986..c8318dd8758ed 100644 --- a/web/pandas/about/team.md +++ b/web/pandas/about/team.md @@ -8,30 +8,22 @@ If you want to support pandas development, you can find information in the [dona ## Maintainers -<div class="row maintainers"> - {% for row in maintainers.people | batch(6, "") %} - <div class="card-group maintainers"> - {% for person in row %} - {% if person %} - <div class="card"> - <img class="card-img-top" alt="" src="{{ person.avatar_url }}"/> - <div class="card-body"> - <h6 class="card-title"> - {% if person.blog %} - <a href="{{ person.blog }}"> - {{ person.name or person.login }} - </a> - {% else %} - {{ person.name or person.login }} - {% endif %} - </h6> - <p class="card-text small"><a href="{{ person.html_url }}">{{ person.login }}</a></p> - </div> - </div> - {% else %} - <div class="card border-0"></div> - {% endif %} - {% endfor %} +<div class="card-group maintainers"> + {% for person in maintainers.people %} + <div class="card"> + <img class="card-img-top" alt="" src="{{ person.avatar_url }}"/> + <div class="card-body"> + <h6 class="card-title"> + {% if person.blog %} + <a href="{{ person.blog }}"> + {{ person.name or person.login }} + </a> + {% else %} + {{ person.name or person.login }} + {% endif %} + </h6> + <p class="card-text small"><a href="{{ person.html_url }}">{{ person.login }}</a></p> + </div> </div> {% endfor %} </div> diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css index d76d1a0befeba..459f006db5727 100644 --- a/web/pandas/static/css/pandas.css +++ b/web/pandas/static/css/pandas.css @@ -45,6 +45,12 @@ a.navbar-brand img { div.card { margin: 0 0 .2em .2em !important; } +@media (min-width: 576px) { + .card-group.maintainers div.card { + min-width: 10rem; + max-width: 10rem; + } +} div.card .card-title { font-weight: 500; color: #130654;
- [x] closes #41438 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Screenshot taken on Google Chrome 90.0.4430.93 ![screenshot](https://user-images.githubusercontent.com/18680207/118068760-ef740800-b370-11eb-902d-237da39e2b7f.png) To reproduce, - go to https://pandas.pydata.org/about/team.html - open developer tools - go to elements tab - navigate to <div class="row maintainers"> - edit as HTML - delete <div class="row maintainers"> and the closing tag at the end
https://api.github.com/repos/pandas-dev/pandas/pulls/41447
2021-05-13T02:33:03Z
2021-05-14T20:06:42Z
2021-05-14T20:06:42Z
2021-05-14T21:24:37Z
BUG: Raise ValueError if names and prefix are both defined
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 84f9dae8a0850..6fbaa85c7adcd 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -840,6 +840,7 @@ I/O - Bug in :func:`read_excel` raising ``AttributeError`` with ``MultiIndex`` header followed by two empty rows and no index, and bug affecting :func:`read_excel`, :func:`read_csv`, :func:`read_table`, :func:`read_fwf`, and :func:`read_clipboard` where one blank row after a ``MultiIndex`` header with no index would be dropped (:issue:`40442`) - Bug in :meth:`DataFrame.to_string` misplacing the truncation column when ``index=False`` (:issue:`40907`) - Bug in :func:`read_orc` always raising ``AttributeError`` (:issue:`40918`) +- Bug in :func:`read_csv` and :func:`read_table` silently ignoring ``prefix`` if ``names`` and ``prefix`` are defined, now raising ``ValueError`` (:issue:`39123`) - Bug in :func:`read_csv` and :func:`read_excel` not respecting dtype for duplicated column name when ``mangle_dupe_cols`` is set to ``True`` (:issue:`35211`) - Bug in :func:`read_csv` and :func:`read_table` misinterpreting arguments when ``sys.setprofile`` had been previously called (:issue:`41069`) - Bug in the conversion from pyarrow to pandas (e.g. for reading Parquet) with nullable dtypes and a pyarrow array whose data buffer size is not a multiple of dtype size (:issue:`40896`) diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 55e3e14a0969d..9f7539f575308 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -20,6 +20,7 @@ import pandas._libs.lib as lib from pandas._libs.parsers import STR_NA_VALUES from pandas._typing import ( + ArrayLike, DtypeArg, FilePathOrBuffer, StorageOptions, @@ -485,11 +486,11 @@ def read_csv( delimiter=None, # Column and Index Locations and Names header="infer", - names=None, + names=lib.no_default, index_col=None, usecols=None, squeeze=False, - prefix=None, + prefix=lib.no_default, mangle_dupe_cols=True, # General Parsing Configuration dtype: Optional[DtypeArg] = None, @@ -546,7 +547,14 @@ def read_csv( del kwds["sep"] kwds_defaults = _refine_defaults_read( - dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","} + dialect, + delimiter, + delim_whitespace, + engine, + sep, + names, + prefix, + defaults={"delimiter": ","}, ) kwds.update(kwds_defaults) @@ -567,11 +575,11 @@ def read_table( delimiter=None, # Column and Index Locations and Names header="infer", - names=None, + names=lib.no_default, index_col=None, usecols=None, squeeze=False, - prefix=None, + prefix=lib.no_default, mangle_dupe_cols=True, # General Parsing Configuration dtype: Optional[DtypeArg] = None, @@ -627,7 +635,14 @@ def read_table( del kwds["sep"] kwds_defaults = _refine_defaults_read( - dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"} + dialect, + delimiter, + delim_whitespace, + engine, + sep, + names, + prefix, + defaults={"delimiter": "\t"}, ) kwds.update(kwds_defaults) @@ -1174,6 +1189,8 @@ def _refine_defaults_read( delim_whitespace: bool, engine: str, sep: Union[str, object], + names: Union[Optional[ArrayLike], object], + prefix: Union[Optional[str], object], defaults: Dict[str, Any], ): """Validate/refine default values of input parameters of read_csv, read_table. @@ -1199,6 +1216,12 @@ def _refine_defaults_read( sep : str or object A delimiter provided by the user (str) or a sentinel value, i.e. pandas._libs.lib.no_default. + names : array-like, optional + List of column names to use. If the file contains a header row, + then you should explicitly pass ``header=0`` to override the column names. + Duplicates in this list are not allowed. + prefix : str, optional + Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ... defaults: dict Default values of input parameters. @@ -1232,6 +1255,12 @@ def _refine_defaults_read( sep is lib.no_default or sep == delim_default ) + if names is not lib.no_default and prefix is not lib.no_default: + raise ValueError("Specified named and prefix; you can only specify one.") + + kwds["names"] = None if names is lib.no_default else names + kwds["prefix"] = None if prefix is lib.no_default else prefix + # Alias sep -> delimiter. if delimiter is None: delimiter = sep diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index fe68597d11f0b..ed395df53432e 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -740,6 +740,18 @@ def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter): parser.read_table(f, delim_whitespace=True, delimiter=delimiter) +@pytest.mark.parametrize("func", ["read_csv", "read_table"]) +@pytest.mark.parametrize("prefix", [None, "x"]) +@pytest.mark.parametrize("names", [None, ["a"]]) +def test_names_and_prefix_not_lib_no_default(all_parsers, names, prefix, func): + # GH#39123 + f = StringIO("a,b\n1,2") + parser = all_parsers + msg = "Specified named and prefix; you can only specify one." + with pytest.raises(ValueError, match=msg): + getattr(parser, func)(f, names=names, prefix=prefix) + + def test_dict_keys_as_names(all_parsers): # GH: 36928 data = "1,2"
- [x] closes #39123 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41446
2021-05-12T22:52:11Z
2021-05-14T16:05:15Z
2021-05-14T16:05:15Z
2023-04-27T19:52:24Z
BUG: resample.apply with non-unique columns
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 84f9dae8a0850..793818419c910 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -901,6 +901,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.rank` with the GroupBy object's ``axis=0`` and the ``rank`` method's keyword ``axis=1`` (:issue:`41320`) - Bug in :meth:`DataFrameGroupBy.__getitem__` with non-unique columns incorrectly returning a malformed :class:`SeriesGroupBy` instead of :class:`DataFrameGroupBy` (:issue:`41427`) - Bug in :meth:`DataFrameGroupBy.transform` with non-unique columns incorrectly raising ``AttributeError`` (:issue:`41427`) +- Bug in :meth:`Resampler.apply` with non-unique columns incorrectly dropping duplicated columns (:issue:`41445`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index cb5b54ca0c598..71d19cdd877a6 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1106,6 +1106,7 @@ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: result: dict[Hashable, NDFrame | np.ndarray] = {} if axis != obj._info_axis_number: + # test_pass_args_kwargs_duplicate_columns gets here with non-unique columns for name, data in self: fres = func(data, *args, **kwargs) result[name] = fres @@ -1119,18 +1120,23 @@ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame: # only for axis==0 + # tests that get here with non-unique cols: + # test_resample_with_timedelta_yields_no_empty_groups, + # test_resample_apply_product obj = self._obj_with_exclusions result: dict[int | str, NDFrame] = {} - for item in obj: - data = obj[item] - colg = SeriesGroupBy(data, selection=item, grouper=self.grouper) - - result[item] = colg.aggregate(func, *args, **kwargs) + for i, item in enumerate(obj): + ser = obj.iloc[:, i] + colg = SeriesGroupBy( + ser, selection=item, grouper=self.grouper, exclusions=self.exclusions + ) - result_columns = obj.columns + result[i] = colg.aggregate(func, *args, **kwargs) - return self.obj._constructor(result, columns=result_columns) + res_df = self.obj._constructor(result) + res_df.columns = obj.columns + return res_df def _wrap_applied_output(self, data, keys, values, not_indexed_same=False): if len(keys) == 0: @@ -1401,6 +1407,7 @@ def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFram def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame: # iterate through columns, see test_transform_exclude_nuisance + # gets here with non-unique columns output = {} inds = [] for i, col in enumerate(obj): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 4368e57a7da4d..83aeb29ec53df 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -248,6 +248,26 @@ def f(x, q=None, axis=0): tm.assert_frame_equal(apply_result, expected, check_names=False) +@pytest.mark.parametrize("as_index", [True, False]) +def test_pass_args_kwargs_duplicate_columns(tsframe, as_index): + # go through _aggregate_frame with self.axis == 0 and duplicate columns + tsframe.columns = ["A", "B", "A", "C"] + gb = tsframe.groupby(lambda x: x.month, as_index=as_index) + + res = gb.agg(np.percentile, 80, axis=0) + + ex_data = { + 1: tsframe[tsframe.index.month == 1].quantile(0.8), + 2: tsframe[tsframe.index.month == 2].quantile(0.8), + } + expected = DataFrame(ex_data).T + if not as_index: + # TODO: try to get this more consistent? + expected.index = Index(range(2)) + + tm.assert_frame_equal(res, expected) + + def test_len(): df = tm.makeTimeDataFrame() grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]) diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 66cb2f2291e98..1c7aa5c444da9 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1748,19 +1748,23 @@ def test_get_timestamp_range_edges(first, last, freq, exp_first, exp_last): assert result == expected -def test_resample_apply_product(): +@pytest.mark.parametrize("duplicates", [True, False]) +def test_resample_apply_product(duplicates): # GH 5586 index = date_range(start="2012-01-31", freq="M", periods=12) ts = Series(range(12), index=index) df = DataFrame({"A": ts, "B": ts + 2}) + if duplicates: + df.columns = ["A", "A"] + result = df.resample("Q").apply(np.product) expected = DataFrame( np.array([[0, 24], [60, 210], [336, 720], [990, 1716]], dtype=np.int64), index=DatetimeIndex( ["2012-03-31", "2012-06-30", "2012-09-30", "2012-12-31"], freq="Q-DEC" ), - columns=["A", "B"], + columns=df.columns, ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_timedelta.py b/pandas/tests/resample/test_timedelta.py index b1560623cd871..e127f69b12674 100644 --- a/pandas/tests/resample/test_timedelta.py +++ b/pandas/tests/resample/test_timedelta.py @@ -153,18 +153,24 @@ def test_resample_timedelta_edge_case(start, end, freq, resample_freq): assert not np.isnan(result[-1]) -def test_resample_with_timedelta_yields_no_empty_groups(): +@pytest.mark.parametrize("duplicates", [True, False]) +def test_resample_with_timedelta_yields_no_empty_groups(duplicates): # GH 10603 df = DataFrame( np.random.normal(size=(10000, 4)), index=timedelta_range(start="0s", periods=10000, freq="3906250n"), ) + if duplicates: + # case with non-unique columns + df.columns = ["A", "B", "A", "C"] + result = df.loc["1s":, :].resample("3s").apply(lambda x: len(x)) expected = DataFrame( [[768] * 4] * 12 + [[528] * 4], index=timedelta_range(start="1s", periods=13, freq="3s"), ) + expected.columns = df.columns tm.assert_frame_equal(result, expected)
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41445
2021-05-12T22:44:38Z
2021-05-13T17:53:29Z
2021-05-13T17:53:29Z
2021-05-13T17:59:00Z
DOC: Improve examples of df.append to better show the ignore_index param
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2941b6ac01904..d90487647d35b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8793,6 +8793,7 @@ def append( Returns ------- DataFrame + A new DataFrame consisting of the rows of caller and the rows of `other`. See Also -------- @@ -8811,18 +8812,18 @@ def append( Examples -------- - >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'), index=['x', 'y']) >>> df A B - 0 1 2 - 1 3 4 - >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) + x 1 2 + y 3 4 + >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'), index=['x', 'y']) >>> df.append(df2) A B - 0 1 2 - 1 3 4 - 0 5 6 - 1 7 8 + x 1 2 + y 3 4 + x 5 6 + y 7 8 With `ignore_index` set to True:
Change index to [0, 2] to stress that ignore_index=True resets the index of both dataframes - [x] closes #41407 - [x] tests passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Test Output: ``` ################################################################################ ################################## Validation ################################## ################################################################################ 1 Errors found: Return value has no description ```
https://api.github.com/repos/pandas-dev/pandas/pulls/41444
2021-05-12T21:26:53Z
2021-05-17T13:18:28Z
2021-05-17T13:18:28Z
2021-05-17T13:18:33Z
Revert "Pin fastparquet to leq 0.5.0"
diff --git a/ci/deps/actions-37-db.yaml b/ci/deps/actions-37-db.yaml index edca7b51a3420..8755e1a02c3cf 100644 --- a/ci/deps/actions-37-db.yaml +++ b/ci/deps/actions-37-db.yaml @@ -15,7 +15,7 @@ dependencies: - beautifulsoup4 - botocore>=1.11 - dask - - fastparquet>=0.4.0, <=0.5.0 + - fastparquet>=0.4.0 - fsspec>=0.7.4 - gcsfs>=0.6.0 - geopandas diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index fdea34d573340..661d8813d32d2 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -15,7 +15,7 @@ dependencies: # pandas dependencies - blosc - bottleneck - - fastparquet>=0.4.0, <=0.5.0 + - fastparquet>=0.4.0 - flask - fsspec>=0.8.0 - matplotlib=3.1.3 diff --git a/environment.yml b/environment.yml index 30fa7c0dea696..2e0228a15272e 100644 --- a/environment.yml +++ b/environment.yml @@ -99,7 +99,7 @@ dependencies: - xlwt - odfpy - - fastparquet>=0.3.2, <=0.5.0 # pandas.read_parquet, DataFrame.to_parquet + - fastparquet>=0.3.2 # pandas.read_parquet, DataFrame.to_parquet - pyarrow>=0.15.0 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - python-snappy # required by pyarrow diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 5ad014a334c27..34d5edee06791 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -327,9 +327,14 @@ def read( if is_fsspec_url(path): fsspec = import_optional_dependency("fsspec") - parquet_kwargs["open_with"] = lambda path, _: fsspec.open( - path, "rb", **(storage_options or {}) - ).open() + if Version(self.api.__version__) > Version("0.6.1"): + parquet_kwargs["fs"] = fsspec.open( + path, "rb", **(storage_options or {}) + ).fs + else: + parquet_kwargs["open_with"] = lambda path, _: fsspec.open( + path, "rb", **(storage_options or {}) + ).open() elif isinstance(path, str) and not os.path.isdir(path): # use get_handle only when we are very certain that it is not a directory # fsspec resources can also point to directories diff --git a/requirements-dev.txt b/requirements-dev.txt index 3e421c7715566..ea7ca43742934 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -64,7 +64,7 @@ xlrd xlsxwriter xlwt odfpy -fastparquet>=0.3.2, <=0.5.0 +fastparquet>=0.3.2 pyarrow>=0.15.0 python-snappy pyqt5>=5.9.2
closes #41366 Reverts pandas-dev/pandas#41370 Looks like fastparquet released a new version.
https://api.github.com/repos/pandas-dev/pandas/pulls/41443
2021-05-12T20:42:36Z
2021-05-14T17:13:47Z
2021-05-14T17:13:47Z
2021-06-01T14:56:11Z
DOC: freeze old whatsnew #6856
diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst index aa2749c85a232..2f5162dcd4b67 100644 --- a/doc/source/whatsnew/v0.10.0.rst +++ b/doc/source/whatsnew/v0.10.0.rst @@ -45,8 +45,7 @@ want to broadcast, we are phasing out this special case (Zen of Python: *Special cases aren't special enough to break the rules*). Here's what I'm talking about: -.. ipython:: python - :okwarning: +.. code-block:: python import pandas as pd @@ -180,7 +179,7 @@ labeled the aggregated group with the end of the interval: the next day). DataFrame constructor with no columns specified. The v0.9.0 behavior (names ``X0``, ``X1``, ...) can be reproduced by specifying ``prefix='X'``: -.. ipython:: python +.. code-block:: python import io @@ -197,7 +196,7 @@ labeled the aggregated group with the end of the interval: the next day). though this can be controlled by new ``true_values`` and ``false_values`` arguments: -.. ipython:: python +.. code-block:: python print(data) pd.read_csv(io.StringIO(data)) @@ -210,7 +209,7 @@ labeled the aggregated group with the end of the interval: the next day). - Calling ``fillna`` on Series or DataFrame with no arguments is no longer valid code. You must either specify a fill value or an interpolation method: -.. ipython:: python +.. code-block:: python s = pd.Series([np.nan, 1.0, 2.0, np.nan, 4]) s @@ -219,7 +218,7 @@ labeled the aggregated group with the end of the interval: the next day). Convenience methods ``ffill`` and ``bfill`` have been added: -.. ipython:: python +.. code-block:: python s.ffill() @@ -228,7 +227,7 @@ Convenience methods ``ffill`` and ``bfill`` have been added: function, that is itself a series, and possibly upcast the result to a DataFrame - .. ipython:: python + .. code-block:: python def f(x): return pd.Series([x, x ** 2], index=["x", "x^2"]) @@ -249,7 +248,7 @@ Convenience methods ``ffill`` and ``bfill`` have been added: Note: ``set_printoptions``/ ``reset_printoptions`` are now deprecated (but functioning), the print options now live under "display.XYZ". For example: - .. ipython:: python + .. code-block:: python pd.get_option("display.max_rows") @@ -264,7 +263,7 @@ Wide DataFrame printing Instead of printing the summary information, pandas now splits the string representation across multiple rows by default: -.. ipython:: python +.. code-block:: python wide_frame = pd.DataFrame(np.random.randn(5, 16)) @@ -273,14 +272,13 @@ representation across multiple rows by default: The old behavior of printing out summary information can be achieved via the 'expand_frame_repr' print option: -.. ipython:: python +.. code-block:: python pd.set_option("expand_frame_repr", False) wide_frame -.. ipython:: python - :suppress: +.. code-block:: python pd.reset_option("expand_frame_repr") diff --git a/doc/source/whatsnew/v0.10.1.rst b/doc/source/whatsnew/v0.10.1.rst index 611ac2021fcec..1bddfb8a85fd8 100644 --- a/doc/source/whatsnew/v0.10.1.rst +++ b/doc/source/whatsnew/v0.10.1.rst @@ -39,9 +39,7 @@ You may need to upgrade your existing data files. Please visit the **compatibility** section in the main docs. -.. ipython:: python - :suppress: - :okexcept: +.. code-block:: python import os @@ -50,7 +48,7 @@ You may need to upgrade your existing data files. Please visit the You can designate (and index) certain columns that you want to be able to perform queries on a table, by passing a list to ``data_columns`` -.. ipython:: python +.. code-block:: python store = pd.HDFStore("store.h5") df = pd.DataFrame( @@ -82,7 +80,7 @@ Retrieving unique values in an indexable or data column. You can now store ``datetime64`` in data columns -.. ipython:: python +.. code-block:: python df_mixed = df.copy() df_mixed["datetime64"] = pd.Timestamp("20010102") @@ -97,7 +95,7 @@ You can pass ``columns`` keyword to select to filter a list of the return columns, this is equivalent to passing a ``Term('columns',list_of_columns_to_filter)`` -.. ipython:: python +.. code-block:: python store.select("df", columns=["A", "B"]) @@ -160,7 +158,7 @@ Multi-table creation via ``append_to_multiple`` and selection via ``select_as_multiple`` can create/select from multiple tables and return a combined result, by using ``where`` on a selector table. -.. ipython:: python +.. code-block:: python df_mt = pd.DataFrame( np.random.randn(8, 6), @@ -184,8 +182,7 @@ combined result, by using ``where`` on a selector table. ["df1_mt", "df2_mt"], where=["A>0", "B>0"], selector="df1_mt" ) -.. ipython:: python - :suppress: +.. code-block:: python store.close() os.remove("store.h5") diff --git a/doc/source/whatsnew/v0.11.0.rst b/doc/source/whatsnew/v0.11.0.rst index 0fba784e36661..450ec73b411d9 100644 --- a/doc/source/whatsnew/v0.11.0.rst +++ b/doc/source/whatsnew/v0.11.0.rst @@ -72,7 +72,7 @@ Dtypes Numeric dtypes will propagate and can coexist in DataFrames. If a dtype is passed (either directly via the ``dtype`` keyword, a passed ``ndarray``, or a passed ``Series``, then it will be preserved in DataFrame operations. Furthermore, different numeric dtypes will **NOT** be combined. The following example will give you a taste. -.. ipython:: python +.. code-block:: python df1 = pd.DataFrame(np.random.randn(8, 1), columns=['A'], dtype='float32') df1 @@ -93,13 +93,13 @@ Dtype conversion This is lower-common-denominator upcasting, meaning you get the dtype which can accommodate all of the types -.. ipython:: python +.. code-block:: python df3.values.dtype Conversion -.. ipython:: python +.. code-block:: python df3.astype('float32').dtypes @@ -288,7 +288,7 @@ in addition to the traditional ``NaT``, or not-a-time. This allows convenient na Furthermore ``datetime64[ns]`` columns are created by default, when passed datetimelike objects (*this change was introduced in 0.10.1*) (:issue:`2809`, :issue:`2810`) -.. ipython:: python +.. code-block:: python df = pd.DataFrame(np.random.randn(6, 2), pd.date_range('20010102', periods=6), columns=['A', ' B']) @@ -304,7 +304,7 @@ Furthermore ``datetime64[ns]`` columns are created by default, when passed datet Astype conversion on ``datetime64[ns]`` to ``object``, implicitly converts ``NaT`` to ``np.nan`` -.. ipython:: python +.. code-block:: python import datetime s = pd.Series([datetime.datetime(2001, 1, 2, 0, 0) for i in range(3)]) @@ -344,15 +344,13 @@ Enhancements - support ``read_hdf/to_hdf`` API similar to ``read_csv/to_csv`` - .. ipython:: python + .. code-block:: python df = pd.DataFrame({'A': range(5), 'B': range(5)}) df.to_hdf('store.h5', 'table', append=True) pd.read_hdf('store.h5', 'table', where=['index > 2']) - .. ipython:: python - :suppress: - :okexcept: + .. code-block:: python import os @@ -367,8 +365,7 @@ Enhancements - You can now select with a string from a DataFrame with a datelike index, in a similar way to a Series (:issue:`3070`) - .. ipython:: python - :okwarning: + .. code-block:: python idx = pd.date_range("2001-10-1", periods=5, freq='M') ts = pd.Series(np.random.rand(len(idx)), index=idx) diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst index c12adb2f1334f..126d250e9718f 100644 --- a/doc/source/whatsnew/v0.12.0.rst +++ b/doc/source/whatsnew/v0.12.0.rst @@ -45,7 +45,7 @@ API changes ``np.nan`` or ``np.inf`` as appropriate (:issue:`3590`). This correct a numpy bug that treats ``integer`` and ``float`` dtypes differently. - .. ipython:: python + .. code-block:: python p = pd.DataFrame({"first": [4, 5, 8], "second": [0, 0, 3]}) p % 0 @@ -93,7 +93,7 @@ API changes This case is rarely used, and there are plenty of alternatives. This preserves the ``iloc`` API to be *purely* positional based. - .. ipython:: python + .. code-block:: python df = pd.DataFrame(range(5), index=list("ABCDE"), columns=["a"]) mask = df.a % 2 == 0 @@ -200,8 +200,7 @@ IO enhancements You can use ``pd.read_html()`` to read the output from ``DataFrame.to_html()`` like so - .. ipython:: python - :okwarning: + .. code-block:: python df = pd.DataFrame({"a": range(3), "b": list("abc")}) print(df) @@ -248,7 +247,7 @@ IO enhancements with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will be *lost*. - .. ipython:: python + .. code-block:: python from pandas._testing import makeCustomDataframe as mkdf @@ -257,8 +256,7 @@ IO enhancements print(open("mi.csv").read()) pd.read_csv("mi.csv", header=[0, 1, 2, 3], index_col=[0, 1]) - .. ipython:: python - :suppress: + .. code-block:: python import os @@ -307,7 +305,7 @@ Other enhancements For example you can do - .. ipython:: python + .. code-block:: python df = pd.DataFrame({"a": list("ab.."), "b": [1, 2, 3, 4]}) df.replace(regex=r"\s*\.\s*", value=np.nan) @@ -317,7 +315,7 @@ Other enhancements Regular string replacement still works as expected. For example, you can do - .. ipython:: python + .. code-block:: python df.replace(".", np.nan) @@ -351,7 +349,7 @@ Other enhancements object. Suppose we want to take only elements that belong to groups with a group sum greater than 2. - .. ipython:: python + .. code-block:: python sf = pd.Series([1, 1, 2, 3, 3, 3]) sf.groupby(sf).filter(lambda x: x.sum() > 2) @@ -362,7 +360,7 @@ Other enhancements Another useful operation is filtering out elements that belong to groups with only a couple members. - .. ipython:: python + .. code-block:: python dff = pd.DataFrame({"A": np.arange(8), "B": list("aabbbbcc")}) dff.groupby("B").filter(lambda x: len(x) > 2) @@ -371,7 +369,7 @@ Other enhancements like-indexed objects where the groups that do not pass the filter are filled with NaNs. - .. ipython:: python + .. code-block:: python dff.groupby("B").filter(lambda x: len(x) > 2, dropna=False) @@ -398,7 +396,7 @@ Experimental features This uses the ``numpy.busdaycalendar`` API introduced in Numpy 1.7 and therefore requires Numpy 1.7.0 or newer. - .. ipython:: python + .. code-block:: python from pandas.tseries.offsets import CustomBusinessDay from datetime import datetime @@ -433,8 +431,7 @@ Bug fixes a ``Series`` with either a single character at each index of the original ``Series`` or ``NaN``. For example, - .. ipython:: python - :okwarning: + .. code-block:: python strs = "go", "bow", "joe", "slow" ds = pd.Series(strs) diff --git a/doc/source/whatsnew/v0.13.0.rst b/doc/source/whatsnew/v0.13.0.rst index 3c6b70fb21383..3dc96731cbe36 100644 --- a/doc/source/whatsnew/v0.13.0.rst +++ b/doc/source/whatsnew/v0.13.0.rst @@ -153,7 +153,7 @@ API changes Added the ``.bool()`` method to ``NDFrame`` objects to facilitate evaluating of single-element boolean Series: - .. ipython:: python + .. code-block:: python pd.Series([True]).bool() pd.Series([False]).bool() @@ -170,15 +170,14 @@ API changes - Chained assignment will now by default warn if the user is assigning to a copy. This can be changed with the option ``mode.chained_assignment``, allowed options are ``raise/warn/None``. See :ref:`the docs<indexing.view_versus_copy>`. - .. ipython:: python + .. code-block:: python dfc = pd.DataFrame({'A': ['aaa', 'bbb', 'ccc'], 'B': [1, 2, 3]}) pd.set_option('chained_assignment', 'warn') The following warning / exception will show if this is attempted. - .. ipython:: python - :okwarning: + .. code-block:: python dfc.loc[0]['A'] = 1111 @@ -192,7 +191,7 @@ API changes Here is the correct method of assignment. - .. ipython:: python + .. code-block:: python dfc.loc[0, 'A'] = 11 dfc @@ -242,14 +241,14 @@ was not contained in the index of a particular axis. (:issue:`2578`). See :ref:` In the ``Series`` case this is effectively an appending operation -.. ipython:: python +.. code-block:: python s = pd.Series([1, 2, 3]) s s[5] = 5. s -.. ipython:: python +.. code-block:: python dfi = pd.DataFrame(np.arange(6).reshape(3, 2), columns=['A', 'B']) @@ -257,14 +256,14 @@ In the ``Series`` case this is effectively an appending operation This would previously ``KeyError`` -.. ipython:: python +.. code-block:: python dfi.loc[:, 'C'] = dfi.loc[:, 'A'] dfi This is like an ``append`` operation. -.. ipython:: python +.. code-block:: python dfi.loc[3] = 5 dfi @@ -314,7 +313,7 @@ Float64Index API change Construction is by default for floating type values. - .. ipython:: python + .. code-block:: python index = pd.Index([1.5, 2, 3, 4.5, 5]) index @@ -323,14 +322,14 @@ Float64Index API change Scalar selection for ``[],.ix,.loc`` will always be label based. An integer will match an equal float index (e.g. ``3`` is equivalent to ``3.0``) - .. ipython:: python + .. code-block:: python s[3] s.loc[3] The only positional indexing is via ``iloc`` - .. ipython:: python + .. code-block:: python s.iloc[3] @@ -338,7 +337,7 @@ Float64Index API change Slicing is ALWAYS on the values of the index, for ``[],ix,loc`` and ALWAYS positional with ``iloc`` - .. ipython:: python + .. code-block:: python s[2:4] s.loc[2:4] @@ -346,7 +345,7 @@ Float64Index API change In float indexes, slicing using floats are allowed - .. ipython:: python + .. code-block:: python s[2.1:4.6] s.loc[2.1:4.6] @@ -374,7 +373,7 @@ HDFStore API changes - Query Format Changes. A much more string-like query format is now supported. See :ref:`the docs<io.hdf5-query>`. - .. ipython:: python + .. code-block:: python path = 'test.h5' dfq = pd.DataFrame(np.random.randn(10, 4), @@ -384,20 +383,19 @@ HDFStore API changes Use boolean expressions, with in-line function evaluation. - .. ipython:: python + .. code-block:: python pd.read_hdf(path, 'dfq', where="index>Timestamp('20130104') & columns=['A', 'B']") Use an inline column reference - .. ipython:: python + .. code-block:: python pd.read_hdf(path, 'dfq', where="A>0 or C>0") - .. ipython:: python - :suppress: + .. code-block:: python import os os.remove(path) @@ -406,7 +404,7 @@ HDFStore API changes the same defaults as prior < 0.13.0 remain, e.g. ``put`` implies ``fixed`` format and ``append`` implies ``table`` format. This default format can be set as an option by setting ``io.hdf.default_format``. - .. ipython:: python + .. code-block:: python path = 'test.h5' df = pd.DataFrame(np.random.randn(10, 2)) @@ -416,8 +414,7 @@ HDFStore API changes with pd.HDFStore(path) as store: print(store) - .. ipython:: python - :suppress: + .. code-block:: python import os os.remove(path) @@ -435,7 +432,7 @@ HDFStore API changes until they themselves are closed. Performing an action on a closed file will raise ``ClosedFileError`` - .. ipython:: python + .. code-block:: python path = 'test.h5' df = pd.DataFrame(np.random.randn(10, 2)) @@ -451,8 +448,7 @@ HDFStore API changes store2.close() store2 - .. ipython:: python - :suppress: + .. code-block:: python import os os.remove(path) @@ -500,7 +496,7 @@ Enhancements - ``NaN`` handing in get_dummies (:issue:`4446`) with ``dummy_na`` - .. ipython:: python + .. code-block:: python # previously, nan was erroneously counted as 2 here # now it is not counted at all @@ -519,7 +515,7 @@ Enhancements Using the new top-level ``to_timedelta``, you can convert a scalar or array from the standard timedelta format (produced by ``to_csv``) into a timedelta type (``np.timedelta64`` in ``nanoseconds``). - .. ipython:: python + .. code-block:: python pd.to_timedelta('1 days 06:05:01.00003') pd.to_timedelta('15.5us') @@ -531,7 +527,7 @@ Enhancements ``timedelta64[ns]`` object, or astyped to yield a ``float64`` dtyped Series. This is frequency conversion. See :ref:`the docs<timedeltas.timedeltas_convert>` for the docs. - .. ipython:: python + .. code-block:: python import datetime td = pd.Series(pd.date_range('20130101', periods=4)) - pd.Series( @@ -550,28 +546,28 @@ Enhancements Dividing or multiplying a ``timedelta64[ns]`` Series by an integer or integer Series - .. ipython:: python + .. code-block:: python td * -1 td * pd.Series([1, 2, 3, 4]) Absolute ``DateOffset`` objects can act equivalently to ``timedeltas`` - .. ipython:: python + .. code-block:: python from pandas import offsets td + offsets.Minute(5) + offsets.Milli(5) Fillna is now supported for timedeltas - .. ipython:: python + .. code-block:: python td.fillna(pd.Timedelta(0)) td.fillna(datetime.timedelta(days=1, seconds=5)) You can do numeric reduction operations on timedeltas. - .. ipython:: python + .. code-block:: python td.mean() td.quantile(.1) @@ -586,8 +582,7 @@ Enhancements - The new vectorized string method ``extract`` return regular expression matches more conveniently. - .. ipython:: python - :okwarning: + .. code-block:: python pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\\d)') @@ -595,8 +590,7 @@ Enhancements with more than one group returns a DataFrame with one column per group. - .. ipython:: python - :okwarning: + .. code-block:: python pd.Series(['a1', 'b2', 'c3']).str.extract('([ab])(\\d)') @@ -607,16 +601,14 @@ Enhancements Named groups like - .. ipython:: python - :okwarning: + .. code-block:: python pd.Series(['a1', 'b2', 'c3']).str.extract( '(?P<letter>[ab])(?P<digit>\\d)') and optional groups can also be used. - .. ipython:: python - :okwarning: + .. code-block:: python pd.Series(['a1', 'b2', '3']).str.extract( '(?P<letter>[ab])?(?P<digit>\\d)') @@ -636,19 +628,19 @@ Enhancements Period conversions in the range of seconds and below were reworked and extended up to nanoseconds. Periods in the nanosecond range are now available. - .. ipython:: python + .. code-block:: python pd.date_range('2013-01-01', periods=5, freq='5N') or with frequency as offset - .. ipython:: python + .. code-block:: python pd.date_range('2013-01-01', periods=5, freq=pd.offsets.Nano(5)) Timestamps can be modified in the nanosecond range - .. ipython:: python + .. code-block:: python t = pd.Timestamp('20130101 09:01:02') t + pd.tseries.offsets.Nano(123) @@ -657,7 +649,7 @@ Enhancements To get the rows where any of the conditions are met: - .. ipython:: python + .. code-block:: python dfi = pd.DataFrame({'A': [1, 2, 3, 4], 'B': ['a', 'b', 'f', 'n']}) dfi @@ -696,7 +688,7 @@ Enhancements - DataFrame has a new ``interpolate`` method, similar to Series (:issue:`4434`, :issue:`1892`) - .. ipython:: python + .. code-block:: python df = pd.DataFrame({'A': [1, 2.1, np.nan, 4.7, 5.6, 6.8], 'B': [.25, np.nan, np.nan, 4, 12.2, 14.4]}) @@ -711,14 +703,14 @@ Enhancements Interpolate now also accepts a ``limit`` keyword argument. This works similar to ``fillna``'s limit: - .. ipython:: python + .. code-block:: python ser = pd.Series([1, 3, np.nan, np.nan, np.nan, 11]) ser.interpolate(limit=2) - Added ``wide_to_long`` panel data convenience function. See :ref:`the docs<reshaping.melt>`. - .. ipython:: python + .. code-block:: python np.random.seed(123) df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, @@ -750,18 +742,18 @@ Experimental ``numexpr`` behind the scenes. This results in large speedups for complicated expressions involving large DataFrames/Series. For example, - .. ipython:: python + .. code-block:: python nrows, ncols = 20000, 100 df1, df2, df3, df4 = [pd.DataFrame(np.random.randn(nrows, ncols)) for _ in range(4)] - .. ipython:: python + .. code-block:: python # eval with NumExpr backend %timeit pd.eval('df1 + df2 + df3 + df4') - .. ipython:: python + .. code-block:: python # pure Python evaluation %timeit df1 + df2 + df3 + df4 @@ -772,8 +764,7 @@ Experimental ``DataFrame.eval`` method that evaluates an expression in the context of the ``DataFrame``. For example, - .. ipython:: python - :suppress: + .. code-block:: python try: del a # noqa: F821 @@ -785,7 +776,7 @@ Experimental except NameError: pass - .. ipython:: python + .. code-block:: python df = pd.DataFrame(np.random.randn(10, 2), columns=['a', 'b']) df.eval('a + b') @@ -794,8 +785,7 @@ Experimental you to select elements of a ``DataFrame`` using a natural query syntax nearly identical to Python syntax. For example, - .. ipython:: python - :suppress: + .. code-block:: python try: del a # noqa: F821 @@ -812,7 +802,7 @@ Experimental except NameError: pass - .. ipython:: python + .. code-block:: python n = 20 df = pd.DataFrame(np.random.randint(n, size=(n, 3)), columns=['a', 'b', 'c']) @@ -845,9 +835,7 @@ Experimental for o in pd.read_msgpack('foo.msg', iterator=True): print(o) - .. ipython:: python - :suppress: - :okexcept: + .. code-block:: python os.remove('foo.msg') @@ -931,13 +919,13 @@ to unify methods and behaviors. Series formerly subclassed directly from as an argument. This seems only to affect ``np.ones_like``, ``np.empty_like``, ``np.diff`` and ``np.where``. These now return ``ndarrays``. - .. ipython:: python + .. code-block:: python s = pd.Series([1, 2, 3, 4]) Numpy Usage - .. ipython:: python + .. code-block:: python np.ones_like(s) np.diff(s) @@ -945,7 +933,7 @@ to unify methods and behaviors. Series formerly subclassed directly from Pandonic Usage - .. ipython:: python + .. code-block:: python pd.Series(1, index=s.index) s.diff() @@ -1021,7 +1009,7 @@ to unify methods and behaviors. Series formerly subclassed directly from - Refactor of ``_get_numeric_data/_get_bool_data`` to core/generic.py, allowing Series/Panel functionality - ``Series`` (for index) / ``Panel`` (for items) now allow attribute access to its elements (:issue:`1903`) - .. ipython:: python + .. code-block:: python s = pd.Series([1, 2, 3], index=list('abc')) s.b diff --git a/doc/source/whatsnew/v0.13.1.rst b/doc/source/whatsnew/v0.13.1.rst index 249b9555b7fd4..4ccc9318f9e3f 100644 --- a/doc/source/whatsnew/v0.13.1.rst +++ b/doc/source/whatsnew/v0.13.1.rst @@ -29,7 +29,7 @@ Highlights include: This would previously segfault: - .. ipython:: python + .. code-block:: python df = pd.DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) df["A"].iloc[0] = np.nan @@ -37,7 +37,7 @@ Highlights include: The recommended way to do this type of assignment is: - .. ipython:: python + .. code-block:: python df = pd.DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) df.loc[0, "A"] = np.nan @@ -50,7 +50,7 @@ Output formatting enhancements - df.info() now honors the option ``max_info_rows``, to disable null counts for large frames (:issue:`5974`) - .. ipython:: python + .. code-block:: python max_info_rows = pd.get_option("max_info_rows") @@ -63,13 +63,13 @@ Output formatting enhancements ) df.iloc[3:6, [0, 2]] = np.nan - .. ipython:: python + .. code-block:: python # set to not display the null counts pd.set_option("max_info_rows", 0) df.info() - .. ipython:: python + .. code-block:: python # this is the default (same as in 0.13.0) pd.set_option("max_info_rows", max_info_rows) @@ -77,7 +77,7 @@ Output formatting enhancements - Add ``show_dimensions`` display option for the new DataFrame repr to control whether the dimensions print. - .. ipython:: python + .. code-block:: python df = pd.DataFrame([[1, 2], [3, 4]]) pd.set_option("show_dimensions", False) @@ -99,7 +99,7 @@ Output formatting enhancements Now the output looks like: - .. ipython:: python + .. code-block:: python df = pd.DataFrame( [pd.Timestamp("20010101"), pd.Timestamp("20040601")], columns=["age"] @@ -117,7 +117,7 @@ API changes - Added ``Series.str.get_dummies`` vectorized string method (:issue:`6021`), to extract dummy/indicator variables for separated string columns: - .. ipython:: python + .. code-block:: python s = pd.Series(["a", "a|b", np.nan, "a|c"]) s.str.get_dummies(sep="|") @@ -218,7 +218,7 @@ Enhancements - ``MultiIndex.from_product`` convenience function for creating a MultiIndex from the cartesian product of a set of iterables (:issue:`6055`): - .. ipython:: python + .. code-block:: python shades = ["light", "dark"] colors = ["red", "green", "blue"] diff --git a/doc/source/whatsnew/v0.14.0.rst b/doc/source/whatsnew/v0.14.0.rst index b59938a9b9c9b..278a13f42e559 100644 --- a/doc/source/whatsnew/v0.14.0.rst +++ b/doc/source/whatsnew/v0.14.0.rst @@ -57,7 +57,7 @@ API changes values. A single indexer that is out-of-bounds and drops the dimensions of the object will still raise ``IndexError`` (:issue:`6296`, :issue:`6299`). This could result in an empty axis (e.g. an empty DataFrame being returned) - .. ipython:: python + .. code-block:: python dfl = pd.DataFrame(np.random.randn(5, 2), columns=list('AB')) dfl @@ -113,7 +113,7 @@ API changes as :meth:`Index.delete` and :meth:`Index.drop` methods will no longer change the type of the resulting index (:issue:`6440`, :issue:`7040`) - .. ipython:: python + .. code-block:: python i = pd.Index([1, 2, 3, 'a', 'b', 'c']) i[[0, 1, 2]] @@ -122,15 +122,14 @@ API changes Previously, the above operation would return ``Int64Index``. If you'd like to do this manually, use :meth:`Index.astype` - .. ipython:: python + .. code-block:: python i[[0, 1, 2]].astype(np.int_) - ``set_index`` no longer converts MultiIndexes to an Index of tuples. For example, the old behavior returned an Index in this case (:issue:`6459`): - .. ipython:: python - :suppress: + .. code-block:: python np.random.seed(1234) from itertools import product @@ -140,7 +139,7 @@ API changes tuple_ind = pd.Index(tuples, tupleize_cols=False) df_multi.index - .. ipython:: python + .. code-block:: python # Old behavior, casted MultiIndex to an Index tuple_ind @@ -152,7 +151,7 @@ API changes This also applies when passing multiple indices to ``set_index``: - .. ipython:: python + .. code-block:: python @suppress df_multi.index = tuple_ind @@ -272,7 +271,7 @@ Display changes The default for ``display.show_dimensions`` will now be ``truncate``. This is consistent with how Series display length. - .. ipython:: python + .. code-block:: python dfd = pd.DataFrame(np.arange(25).reshape(-1, 5), index=[0, 1, 2, 3, 4], @@ -328,7 +327,7 @@ More consistent behavior for some groupby methods: - groupby ``head`` and ``tail`` now act more like ``filter`` rather than an aggregation: - .. ipython:: python + .. code-block:: python df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) g = df.groupby('A') @@ -338,7 +337,7 @@ More consistent behavior for some groupby methods: - groupby head and tail respect column selection: - .. ipython:: python + .. code-block:: python g[['B']].head(1) @@ -347,7 +346,7 @@ More consistent behavior for some groupby methods: Reducing - .. ipython:: python + .. code-block:: python df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) g = df.groupby('A') @@ -361,7 +360,7 @@ More consistent behavior for some groupby methods: Filtering - .. ipython:: python + .. code-block:: python gf = df.groupby('A', as_index=False) gf.nth(0) @@ -370,7 +369,7 @@ More consistent behavior for some groupby methods: - groupby will now not return the grouped column for non-cython functions (:issue:`5610`, :issue:`5614`, :issue:`6732`), as its already the index - .. ipython:: python + .. code-block:: python df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6], [5, 8]], columns=['A', 'B']) g = df.groupby('A') @@ -379,7 +378,7 @@ More consistent behavior for some groupby methods: - passing ``as_index`` will leave the grouped column in-place (this is not change in 0.14.0) - .. ipython:: python + .. code-block:: python df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6], [5, 8]], columns=['A', 'B']) g = df.groupby('A', as_index=False) @@ -426,7 +425,7 @@ To connect with SQLAlchemy you use the :func:`create_engine` function to create object from database URI. You only need to create the engine once per database you are connecting to. For an in-memory sqlite database: -.. ipython:: python +.. code-block:: python from sqlalchemy import create_engine # Create your connection. @@ -434,20 +433,20 @@ connecting to. For an in-memory sqlite database: This ``engine`` can then be used to write or read data to/from this database: -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'c']}) df.to_sql('db_table', engine, index=False) You can read data from a database by specifying the table name: -.. ipython:: python +.. code-block:: python pd.read_sql_table('db_table', engine) or by specifying a sql query: -.. ipython:: python +.. code-block:: python pd.read_sql_query('SELECT * FROM db_table', engine) @@ -512,7 +511,7 @@ See also issues (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :is You will need to make sure that the selection axes are fully lexsorted! -.. ipython:: python +.. code-block:: python def mklbl(prefix, n): return ["%s%s" % (prefix, i) for i in range(n)] @@ -532,13 +531,13 @@ See also issues (:issue:`6134`, :issue:`4036`, :issue:`3057`, :issue:`2598`, :is Basic MultiIndex slicing using slices, lists, and labels. -.. ipython:: python +.. code-block:: python df.loc[(slice('A1', 'A3'), slice(None), ['C1', 'C3']), :] You can use a ``pd.IndexSlice`` to shortcut the creation of these slices -.. ipython:: python +.. code-block:: python idx = pd.IndexSlice df.loc[idx[:, :, ['C1', 'C3']], idx[:, 'foo']] @@ -546,14 +545,14 @@ You can use a ``pd.IndexSlice`` to shortcut the creation of these slices It is possible to perform quite complicated selections using this method on multiple axes at the same time. -.. ipython:: python +.. code-block:: python df.loc['A1', (slice(None), 'foo')] df.loc[idx[:, :, ['C1', 'C3']], idx[:, 'foo']] Using a boolean indexer you can provide selection related to the *values*. -.. ipython:: python +.. code-block:: python mask = df[('a', 'foo')] > 200 df.loc[idx[mask, :, ['C1', 'C3']], idx[:, 'foo']] @@ -561,13 +560,13 @@ Using a boolean indexer you can provide selection related to the *values*. You can also specify the ``axis`` argument to ``.loc`` to interpret the passed slicers on a single axis. -.. ipython:: python +.. code-block:: python df.loc(axis=0)[:, :, ['C1', 'C3']] Furthermore you can *set* the values using these methods -.. ipython:: python +.. code-block:: python df2 = df.copy() df2.loc(axis=0)[:, :, ['C1', 'C3']] = -10 @@ -575,7 +574,7 @@ Furthermore you can *set* the values using these methods You can use a right-hand-side of an alignable object as well. -.. ipython:: python +.. code-block:: python df2 = df.copy() df2.loc[idx[:, :, ['C1', 'C3']], :] = df2 * 1000 @@ -744,7 +743,7 @@ Enhancements - DataFrame and Series will create a MultiIndex object if passed a tuples dict, See :ref:`the docs<basics.dataframe.from_dict_of_tuples>` (:issue:`3323`) - .. ipython:: python + .. code-block:: python pd.Series({('a', 'b'): 1, ('a', 'a'): 0, ('a', 'c'): 2, ('b', 'a'): 3, ('b', 'b'): 4}) @@ -763,7 +762,7 @@ Enhancements See :ref:`the docs<merging.join_on_mi>`. Joining MultiIndex DataFrames on both the left and right is not yet supported ATM. - .. ipython:: python + .. code-block:: python household = pd.DataFrame({'household_id': [1, 2, 3], 'male': [0, 1, 0], @@ -822,7 +821,7 @@ Enhancements - :meth:`~DataFrame.describe` now accepts an array of percentiles to include in the summary statistics (:issue:`4196`) - ``pivot_table`` can now accept ``Grouper`` by ``index`` and ``columns`` keywords (:issue:`6913`) - .. ipython:: python + .. code-block:: python import datetime df = pd.DataFrame({ @@ -853,7 +852,7 @@ Enhancements - ``PeriodIndex`` fully supports partial string indexing like ``DatetimeIndex`` (:issue:`7043`) - .. ipython:: python + .. code-block:: python prng = pd.period_range('2013-01-01 09:00', periods=100, freq='H') ps = pd.Series(np.random.randn(len(prng)), index=prng) diff --git a/doc/source/whatsnew/v0.14.1.rst b/doc/source/whatsnew/v0.14.1.rst index a8f8955c3c1b9..eb686bf4bee71 100644 --- a/doc/source/whatsnew/v0.14.1.rst +++ b/doc/source/whatsnew/v0.14.1.rst @@ -64,14 +64,13 @@ API changes Starting from 0.14.1 all offsets preserve time by default. The old behaviour can be obtained with ``normalize=True`` - .. ipython:: python - :suppress: + .. code-block:: python import pandas.tseries.offsets as offsets d = pd.Timestamp("2014-01-01 09:00") - .. ipython:: python + .. code-block:: python # new behaviour d + offsets.MonthEnd() @@ -122,7 +121,7 @@ Enhancements - Support for dateutil timezones, which can now be used in the same way as pytz timezones across pandas. (:issue:`4688`) - .. ipython:: python + .. code-block:: python rng = pd.date_range( "3/6/2012 00:00", periods=10, freq="D", tz="dateutil/Europe/London" diff --git a/doc/source/whatsnew/v0.15.0.rst b/doc/source/whatsnew/v0.15.0.rst index fc2b070df4392..2395c0cc19999 100644 --- a/doc/source/whatsnew/v0.15.0.rst +++ b/doc/source/whatsnew/v0.15.0.rst @@ -69,8 +69,7 @@ methods to manipulate. Thanks to Jan Schulz for much of this API/implementation. For full docs, see the :ref:`categorical introduction <categorical>` and the :ref:`API documentation <api.arrays.categorical>`. -.. ipython:: python - :okwarning: +.. code-block:: python df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']}) @@ -146,7 +145,7 @@ This type is very similar to how ``Timestamp`` works for ``datetimes``. It is a Construct a scalar -.. ipython:: python +.. code-block:: python pd.Timedelta('1 days 06:05:01.00003') pd.Timedelta('15.5us') @@ -161,7 +160,7 @@ Construct a scalar Access fields for a ``Timedelta`` -.. ipython:: python +.. code-block:: python td = pd.Timedelta('1 hour 3m 15.5us') td.seconds @@ -170,12 +169,11 @@ Access fields for a ``Timedelta`` Construct a ``TimedeltaIndex`` -.. ipython:: python - :suppress: +.. code-block:: python import datetime -.. ipython:: python +.. code-block:: python pd.TimedeltaIndex(['1 days', '1 days, 00:00:05', np.timedelta64(2, 'D'), @@ -183,14 +181,14 @@ Construct a ``TimedeltaIndex`` Constructing a ``TimedeltaIndex`` with a regular range -.. ipython:: python +.. code-block:: python pd.timedelta_range('1 days', periods=5, freq='D') pd.timedelta_range(start='1 days', end='2 days', freq='30T') You can now use a ``TimedeltaIndex`` as the index of a pandas object -.. ipython:: python +.. code-block:: python s = pd.Series(np.arange(5), index=pd.timedelta_range('1 days', periods=5, freq='s')) @@ -198,14 +196,14 @@ You can now use a ``TimedeltaIndex`` as the index of a pandas object You can select with partial string selections -.. ipython:: python +.. code-block:: python s['1 day 00:00:02'] s['1 day':'1 day 00:00:02'] Finally, the combination of ``TimedeltaIndex`` with ``DatetimeIndex`` allow certain combination operations that are ``NaT`` preserving: -.. ipython:: python +.. code-block:: python tdi = pd.TimedeltaIndex(['1 days', pd.NaT, '2 days']) tdi.tolist() @@ -227,7 +225,7 @@ Implemented methods to find memory usage of a DataFrame. See the :ref:`FAQ <df-m A new display option ``display.memory_usage`` (see :ref:`options`) sets the default behavior of the ``memory_usage`` argument in the ``df.info()`` method. By default ``display.memory_usage`` is ``True``. -.. ipython:: python +.. code-block:: python dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]', 'complex128', 'object', 'bool'] @@ -240,7 +238,7 @@ A new display option ``display.memory_usage`` (see :ref:`options`) sets the defa Additionally :meth:`~pandas.DataFrame.memory_usage` is an available method for a dataframe object which returns the memory usage of each column. -.. ipython:: python +.. code-block:: python df.memory_usage(index=True) @@ -253,7 +251,7 @@ Series.dt accessor ``Series`` has gained an accessor to succinctly return datetime like properties for the *values* of the Series, if its a datetime/period like Series. (:issue:`7207`) This will return a Series, indexed like the existing Series. See the :ref:`docs <basics.dt_accessors>` -.. ipython:: python +.. code-block:: python # datetime s = pd.Series(pd.date_range('20130101 09:10:12', periods=4)) @@ -265,13 +263,13 @@ This will return a Series, indexed like the existing Series. See the :ref:`docs This enables nice expressions like this: -.. ipython:: python +.. code-block:: python s[s.dt.day == 2] You can easily produce tz aware transformations: -.. ipython:: python +.. code-block:: python stz = s.dt.tz_localize('US/Eastern') stz @@ -279,13 +277,13 @@ You can easily produce tz aware transformations: You can also chain these types of operations: -.. ipython:: python +.. code-block:: python s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern') The ``.dt`` accessor works for period and timedelta dtypes. -.. ipython:: python +.. code-block:: python # period s = pd.Series(pd.period_range('20130101', periods=4, freq='D')) @@ -293,7 +291,7 @@ The ``.dt`` accessor works for period and timedelta dtypes. s.dt.year s.dt.day -.. ipython:: python +.. code-block:: python # timedelta s = pd.Series(pd.timedelta_range('1 day 00:00:05', periods=4, freq='s')) @@ -311,7 +309,7 @@ Timezone handling improvements - ``tz_localize(None)`` for tz-aware ``Timestamp`` and ``DatetimeIndex`` now removes timezone holding local time, previously this resulted in ``Exception`` or ``TypeError`` (:issue:`7812`) - .. ipython:: python + .. code-block:: python ts = pd.Timestamp('2014-08-01 09:00', tz='US/Eastern') ts @@ -347,7 +345,7 @@ Rolling/expanding moments improvements Prior to 0.15.0 - .. ipython:: python + .. code-block:: python s = pd.Series([10, 11, 12, 13]) @@ -407,7 +405,7 @@ Rolling/expanding moments improvements the calculated weighted means (e.g. 'triang', 'gaussian') are distributed about the same means as those calculated without weighting (i.e. 'boxcar'). See :ref:`the note on normalization <window.weighted>` for further details. (:issue:`7618`) - .. ipython:: python + .. code-block:: python s = pd.Series([10.5, 8.8, 11.4, 9.7, 9.3]) @@ -456,7 +454,7 @@ Rolling/expanding moments improvements Prior behavior (note values start at index ``2``, which is ``min_periods`` after index ``0`` (the index of the first non-empty value)): - .. ipython:: python + .. code-block:: python s = pd.Series([1, None, None, None, 2, 3]) @@ -551,7 +549,7 @@ Rolling/expanding moments improvements For example, consider the following pre-0.15.0 results for ``ewmvar(..., bias=False)``, and the corresponding debiasing factors: - .. ipython:: python + .. code-block:: python s = pd.Series([1., 2., 0., 4.]) @@ -665,7 +663,7 @@ Other notable API changes: - Consistency when indexing with ``.loc`` and a list-like indexer when no values are found. - .. ipython:: python + .. code-block:: python df = pd.DataFrame([['a'], ['b']], index=[1, 2]) df @@ -727,8 +725,7 @@ Other notable API changes: Furthermore, ``.loc`` will raise If no values are found in a MultiIndex with a list-like indexer: - .. ipython:: python - :okexcept: + .. code-block:: python s = pd.Series(np.arange(3, dtype='int64'), index=pd.MultiIndex.from_product([['A'], @@ -747,7 +744,7 @@ Other notable API changes: dtype to object (or errored, depending on the call). It now uses ``NaN``: - .. ipython:: python + .. code-block:: python s = pd.Series([1, 2, 3]) s.loc[0] = None @@ -758,7 +755,7 @@ Other notable API changes: For object containers, we now preserve ``None`` values (previously these were converted to ``NaN`` values). - .. ipython:: python + .. code-block:: python s = pd.Series(["a", "b", "c"]) s.loc[0] = None @@ -768,7 +765,7 @@ Other notable API changes: - In prior versions, updating a pandas object inplace would not reflect in other python references to this object. (:issue:`8511`, :issue:`5104`) - .. ipython:: python + .. code-block:: python s = pd.Series([1, 2, 3]) s2 = s @@ -798,7 +795,7 @@ Other notable API changes: This is now the correct behavior - .. ipython:: python + .. code-block:: python # the original object s @@ -820,7 +817,7 @@ Other notable API changes: In prior versions this would drop the timezone, now it retains the timezone, but gives a column of ``object`` dtype: - .. ipython:: python + .. code-block:: python i = pd.date_range('1/1/2011', periods=3, freq='10s', tz='US/Eastern') i @@ -841,7 +838,7 @@ Other notable API changes: - ``SettingWithCopy`` raise/warnings (according to the option ``mode.chained_assignment``) will now be issued when setting a value on a sliced mixed-dtype DataFrame using chained-assignment. (:issue:`7845`, :issue:`7950`) - .. code-block:: python + .. code-block:: ipython In [1]: df = pd.DataFrame(np.arange(0, 9), columns=['count']) @@ -859,7 +856,7 @@ Other notable API changes: - Previously an enlargement with a mixed-dtype frame would act unlike ``.append`` which will preserve dtypes (related :issue:`2578`, :issue:`8176`): - .. ipython:: python + .. code-block:: python df = pd.DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"]) @@ -983,7 +980,7 @@ Other: - :func:`describe` on mixed-types DataFrames is more flexible. Type-based column filtering is now possible via the ``include``/``exclude`` arguments. See the :ref:`docs <basics.describe>` (:issue:`8164`). - .. ipython:: python + .. code-block:: python df = pd.DataFrame({'catA': ['foo', 'foo', 'bar'] * 8, 'catB': ['a', 'b', 'c', 'd'] * 6, @@ -994,7 +991,7 @@ Other: Requesting all columns is possible with the shorthand 'all' - .. ipython:: python + .. code-block:: python df.describe(include='all') @@ -1006,7 +1003,7 @@ Other: categorical columns are encoded as 0's and 1's, while other columns are left untouched. - .. ipython:: python + .. code-block:: python df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['c', 'c', 'b'], 'C': [1, 2, 3]}) @@ -1018,7 +1015,7 @@ Other: - ``pandas.tseries.holiday.Holiday`` now supports a days_of_week parameter (:issue:`7070`) - ``GroupBy.nth()`` now supports selecting multiple nth values (:issue:`7910`) - .. ipython:: python + .. code-block:: python business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B') df = pd.DataFrame(1, index=business_dates, columns=['a', 'b']) @@ -1029,7 +1026,7 @@ Other: If ``Period`` freq is ``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``, ``Timedelta``-like can be added if the result can have same freq. Otherwise, only the same ``offsets`` can be added. - .. ipython:: python + .. code-block:: python idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H') idx @@ -1055,7 +1052,7 @@ Other: - :func:`set_names`, :func:`set_labels`, and :func:`set_levels` methods now take an optional ``level`` keyword argument to all modification of specific level(s) of a MultiIndex. Additionally :func:`set_names` now accepts a scalar string value when operating on an ``Index`` or on a specific level of a ``MultiIndex`` (:issue:`7792`) - .. ipython:: python + .. code-block:: python idx = pd.MultiIndex.from_product([['a'], range(3), list("pqr")], names=['foo', 'bar', 'baz']) @@ -1079,7 +1076,7 @@ Other: - ``Index`` now supports ``duplicated`` and ``drop_duplicates``. (:issue:`4060`) - .. ipython:: python + .. code-block:: python idx = pd.Index([1, 2, 3, 4, 1, 2]) idx diff --git a/doc/source/whatsnew/v0.15.1.rst b/doc/source/whatsnew/v0.15.1.rst index a1d4f9d14a905..6ff3218ce9fb7 100644 --- a/doc/source/whatsnew/v0.15.1.rst +++ b/doc/source/whatsnew/v0.15.1.rst @@ -21,7 +21,7 @@ API changes - ``s.dt.hour`` and other ``.dt`` accessors will now return ``np.nan`` for missing values (rather than previously -1), (:issue:`8689`) - .. ipython:: python + .. code-block:: python s = pd.Series(pd.date_range("20130101", periods=5, freq="D")) s.iloc[2] = np.nan @@ -42,14 +42,14 @@ API changes current behavior: - .. ipython:: python + .. code-block:: python s.dt.hour - ``groupby`` with ``as_index=False`` will not add erroneous extra columns to result (:issue:`8582`): - .. ipython:: python + .. code-block:: python np.random.seed(2718281) df = pd.DataFrame(np.random.randint(0, 100, (10, 2)), columns=["jim", "joe"]) @@ -70,14 +70,14 @@ API changes current behavior: - .. ipython:: python + .. code-block:: python df.groupby(ts, as_index=False).max() - ``groupby`` will not erroneously exclude columns if the column name conflicts with the grouper name (:issue:`8112`): - .. ipython:: python + .. code-block:: python df = pd.DataFrame({"jim": range(5), "joe": range(5, 10)}) df @@ -96,14 +96,14 @@ API changes current behavior: - .. ipython:: python + .. code-block:: python gr.apply(sum) - Support for slicing with monotonic decreasing indexes, even if ``start`` or ``stop`` is not found in the index (:issue:`7860`): - .. ipython:: python + .. code-block:: python s = pd.Series(["a", "b", "c", "d"], [4, 3, 2, 1]) s @@ -117,7 +117,7 @@ API changes current behavior: - .. ipython:: python + .. code-block:: python s.loc[3.5:1.5] @@ -204,7 +204,7 @@ Enhancements - ``concat`` permits a wider variety of iterables of pandas objects to be passed as the first parameter (:issue:`8645`): - .. ipython:: python + .. code-block:: python from collections import deque @@ -220,13 +220,13 @@ Enhancements current behavior: - .. ipython:: python + .. code-block:: python pd.concat(deque((df1, df2))) - Represent ``MultiIndex`` labels with a dtype that utilizes memory based on the level size. In prior versions, the memory usage was a constant 8 bytes per element in each level. In addition, in prior versions, the *reported* memory usage was incorrect as it didn't show the usage for the memory occupied by the underling data array. (:issue:`8456`) - .. ipython:: python + .. code-block:: python dfi = pd.DataFrame( 1, index=pd.MultiIndex.from_product([["a"], range(1000)]), columns=["A"] @@ -246,7 +246,7 @@ Enhancements current behavior: - .. ipython:: python + .. code-block:: python dfi.memory_usage(index=True) diff --git a/doc/source/whatsnew/v0.15.2.rst b/doc/source/whatsnew/v0.15.2.rst index 2dae76dd6b461..b2f77f6e2a286 100644 --- a/doc/source/whatsnew/v0.15.2.rst +++ b/doc/source/whatsnew/v0.15.2.rst @@ -24,8 +24,7 @@ API changes - Indexing in ``MultiIndex`` beyond lex-sort depth is now supported, though a lexically sorted index will have a better performance. (:issue:`2646`) - .. ipython:: python - :okwarning: + .. code-block:: python df = pd.DataFrame({'jim':[0, 0, 1, 1], 'joe':['x', 'x', 'z', 'y'], @@ -61,7 +60,7 @@ API changes Now, only the categories that do effectively occur in the array are returned: - .. ipython:: python + .. code-block:: python cat = pd.Categorical(['a', 'b', 'a'], categories=['a', 'b', 'c']) cat.unique() @@ -72,7 +71,7 @@ API changes - Bug in ``NDFrame``: conflicting attribute/column names now behave consistently between getting and setting. Previously, when both a column and attribute named ``y`` existed, ``data.y`` would return the attribute, while ``data.y = z`` would update the column (:issue:`8994`) - .. ipython:: python + .. code-block:: python data = pd.DataFrame({'x': [1, 2, 3]}) data.y = 2 @@ -94,7 +93,7 @@ API changes New behavior: - .. ipython:: python + .. code-block:: python data.y data['y'].values @@ -121,7 +120,7 @@ API changes New behavior: - .. ipython:: python + .. code-block:: python s = pd.Series(np.arange(3), ['a', 'b', 'c']) s.loc['c':'a':-1] @@ -153,8 +152,7 @@ Other enhancements: - ``Series.all`` and ``Series.any`` now support the ``level`` and ``skipna`` parameters (:issue:`8302`): - .. ipython:: python - :okwarning: + .. code-block:: python s = pd.Series([False, True, False], index=[0, 0, 1]) s.any(level=0) diff --git a/doc/source/whatsnew/v0.16.0.rst b/doc/source/whatsnew/v0.16.0.rst index 8d0d6854cbf85..4c0b10cb315ec 100644 --- a/doc/source/whatsnew/v0.16.0.rst +++ b/doc/source/whatsnew/v0.16.0.rst @@ -51,7 +51,7 @@ to be inserted (for example, a ``Series`` or NumPy array), or a function of one argument to be called on the ``DataFrame``. The new values are inserted, and the entire DataFrame (with all original and new columns) is returned. -.. ipython:: python +.. code-block:: python iris = pd.read_csv('data/iris.data') iris.head() @@ -61,7 +61,7 @@ and the entire DataFrame (with all original and new columns) is returned. Above was an example of inserting a precomputed value. We can also pass in a function to be evaluated. -.. ipython:: python +.. code-block:: python iris.assign(sepal_ratio=lambda x: (x['SepalWidth'] / x['SepalLength'])).head() @@ -70,7 +70,7 @@ The power of ``assign`` comes when used in chains of operations. For example, we can limit the DataFrame to just those with a Sepal Length greater than 5, calculate the ratio, and plot -.. ipython:: python +.. code-block:: python iris = pd.read_csv('data/iris.data') (iris.query('SepalLength > 5') @@ -146,7 +146,7 @@ String methods enhancements ``find()`` ``rfind()`` ``ljust()`` ``rjust()`` ``zfill()`` ============= ============= ============= =============== =============== - .. ipython:: python + .. code-block:: python s = pd.Series(['abcd', '3456', 'EFGH']) s.str.isalpha() @@ -154,14 +154,14 @@ String methods enhancements - :meth:`Series.str.pad` and :meth:`Series.str.center` now accept ``fillchar`` option to specify filling character (:issue:`9352`) - .. ipython:: python + .. code-block:: python s = pd.Series(['12', '300', '25']) s.str.pad(5, fillchar='_') - Added :meth:`Series.str.slice_replace`, which previously raised ``NotImplementedError`` (:issue:`8888`) - .. ipython:: python + .. code-block:: python s = pd.Series(['ABCD', 'EFGH', 'IJK']) s.str.slice_replace(1, 3, 'X') @@ -175,7 +175,7 @@ Other enhancements - Reindex now supports ``method='nearest'`` for frames or series with a monotonic increasing or decreasing index (:issue:`9258`): - .. ipython:: python + .. code-block:: python df = pd.DataFrame({'x': range(5)}) df.reindex([0.2, 1.8, 3.5], method='nearest') @@ -243,7 +243,7 @@ Previous behavior New behavior -.. ipython:: python +.. code-block:: python t = pd.Timedelta('1 day, 10:11:12.100123') t.days @@ -252,7 +252,7 @@ New behavior Using ``.components`` allows the full component access -.. ipython:: python +.. code-block:: python t.components t.components.seconds @@ -266,7 +266,7 @@ The behavior of a small sub-set of edge cases for using ``.loc`` have changed (: - Slicing with ``.loc`` where the start and/or stop bound is not found in the index is now allowed; this previously would raise a ``KeyError``. This makes the behavior the same as ``.ix`` in this case. This change is only for slicing, not when indexing with a single label. - .. ipython:: python + .. code-block:: python df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'), @@ -287,7 +287,7 @@ The behavior of a small sub-set of edge cases for using ``.loc`` have changed (: New behavior - .. ipython:: python + .. code-block:: python df.loc['2013-01-02':'2013-01-10'] s.loc[-10:3] @@ -367,7 +367,7 @@ Previous behavior New behavior -.. ipython:: python +.. code-block:: python s = pd.Series([0, 1, 2], dtype='category') s @@ -491,7 +491,7 @@ Other API changes New behavior - .. ipython:: python + .. code-block:: python p = pd.Series([0, 1]) p / 0 @@ -511,7 +511,7 @@ Other API changes Fixed behavior: - .. ipython:: python + .. code-block:: python pd.to_datetime(['2000-01-31', '2000-02-28']).asof('2000-02') @@ -674,7 +674,7 @@ Bug fixes The following would previously report a ``SettingWithCopy`` Warning. - .. ipython:: python + .. code-block:: python df1 = pd.DataFrame({'x': pd.Series(['a', 'b', 'c']), 'y': pd.Series(['d', 'e', 'f'])}) diff --git a/doc/source/whatsnew/v0.16.1.rst b/doc/source/whatsnew/v0.16.1.rst index 269854111373f..74fbc5da37839 100644 --- a/doc/source/whatsnew/v0.16.1.rst +++ b/doc/source/whatsnew/v0.16.1.rst @@ -181,7 +181,7 @@ total number or rows or columns. It also has options for sampling with or withou for passing in a column for weights for non-uniform sampling, and for setting seed values to facilitate replication. (:issue:`2419`) -.. ipython:: python +.. code-block:: python example_series = pd.Series([0, 1, 2, 3, 4, 5]) @@ -207,7 +207,7 @@ facilitate replication. (:issue:`2419`) When applied to a DataFrame, one may pass the name of a column to specify sampling weights when sampling from rows. -.. ipython:: python +.. code-block:: python df = pd.DataFrame({"col1": [9, 8, 7, 6], "weight_column": [0.5, 0.4, 0.1, 0]}) df.sample(n=3, weights="weight_column") @@ -226,7 +226,7 @@ enhancements make string operations easier and more consistent with standard pyt The ``.str`` accessor is now available for both ``Series`` and ``Index``. - .. ipython:: python + .. code-block:: python idx = pd.Index([" jack", "jill ", " jesse ", "frank"]) idx.str.strip() @@ -235,7 +235,7 @@ enhancements make string operations easier and more consistent with standard pyt will return a ``np.array`` instead of a boolean ``Index`` (:issue:`8875`). This enables the following expression to work naturally: - .. ipython:: python + .. code-block:: python idx = pd.Index(["a1", "a2", "b1", "b2"]) s = pd.Series(range(4), index=idx) @@ -254,7 +254,7 @@ enhancements make string operations easier and more consistent with standard pyt - ``split`` now takes ``expand`` keyword to specify whether to expand dimensionality. ``return_type`` is deprecated. (:issue:`9847`) - .. ipython:: python + .. code-block:: python s = pd.Series(["a,b", "a,c", "b,c"]) @@ -283,7 +283,7 @@ Other enhancements - ``BusinessHour`` offset is now supported, which represents business hours starting from 09:00 - 17:00 on ``BusinessDay`` by default. See :ref:`Here <timeseries.businesshour>` for details. (:issue:`7905`) - .. ipython:: python + .. code-block:: python pd.Timestamp("2014-08-01 09:00") + pd.tseries.offsets.BusinessHour() pd.Timestamp("2014-08-01 07:00") + pd.tseries.offsets.BusinessHour() @@ -297,7 +297,7 @@ Other enhancements - ``drop`` function can now accept ``errors`` keyword to suppress ``ValueError`` raised when any of label does not exist in the target data. (:issue:`6736`) - .. ipython:: python + .. code-block:: python df = pd.DataFrame(np.random.randn(3, 3), columns=["A", "B", "C"]) df.drop(["A", "X"], axis=1, errors="ignore") @@ -379,7 +379,7 @@ Previous behavior New behavior -.. ipython:: python +.. code-block:: python pd.set_option("display.width", 80) pd.Index(range(4), name="foo") diff --git a/doc/source/whatsnew/v0.16.2.rst b/doc/source/whatsnew/v0.16.2.rst index 37e8c64ea9ced..514be342c67c5 100644 --- a/doc/source/whatsnew/v0.16.2.rst +++ b/doc/source/whatsnew/v0.16.2.rst @@ -61,7 +61,7 @@ In the example above, the functions ``f``, ``g``, and ``h`` each expected the Da When the function you wish to apply takes its data anywhere other than the first argument, pass a tuple of ``(function, keyword)`` indicating where the DataFrame should flow. For example: -.. ipython:: python +.. code-block:: python import statsmodels.formula.api as sm diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst index d8f39a7d6e3c0..498663b3f4130 100644 --- a/doc/source/whatsnew/v0.17.0.rst +++ b/doc/source/whatsnew/v0.17.0.rst @@ -78,7 +78,7 @@ number rows. See the :ref:`docs <timeseries.timezone_series>` for more details. The new implementation allows for having a single-timezone across all rows, with operations in a performant manner. -.. ipython:: python +.. code-block:: python df = pd.DataFrame( { @@ -90,14 +90,14 @@ The new implementation allows for having a single-timezone across all rows, with df df.dtypes -.. ipython:: python +.. code-block:: python df.B df.B.dt.tz_localize(None) This uses a new-dtype representation as well, that is very similar in look-and-feel to its numpy cousin ``datetime64[ns]`` -.. ipython:: python +.. code-block:: python df["B"].dtype type(df["B"].dtype) @@ -121,7 +121,7 @@ This uses a new-dtype representation as well, that is very similar in look-and-f New behavior: - .. ipython:: python + .. code-block:: python pd.date_range("20130101", periods=3, tz="US/Eastern") pd.date_range("20130101", periods=3, tz="US/Eastern").dtype @@ -161,8 +161,7 @@ The Series and DataFrame ``.plot()`` method allows for customizing :ref:`plot ty To alleviate this issue, we have added a new, optional plotting interface, which exposes each kind of plot as a method of the ``.plot`` attribute. Instead of writing ``series.plot(kind=<kind>, ...)``, you can now also use ``series.plot.<kind>(...)``: -.. ipython:: - :verbatim: +.. code-block:: ipython In [13]: df = pd.DataFrame(np.random.rand(10, 2), columns=['a', 'b']) @@ -172,8 +171,7 @@ To alleviate this issue, we have added a new, optional plotting interface, which As a result of this change, these methods are now all discoverable via tab-completion: -.. ipython:: - :verbatim: +.. code-block:: ipython In [15]: df.plot.<TAB> # noqa: E225, E999 df.plot.area df.plot.barh df.plot.density df.plot.hist df.plot.line df.plot.scatter @@ -191,14 +189,14 @@ Series.dt.strftime We are now supporting a ``Series.dt.strftime`` method for datetime-likes to generate a formatted string (:issue:`10110`). Examples: -.. ipython:: python +.. code-block:: python # DatetimeIndex s = pd.Series(pd.date_range("20130101", periods=4)) s s.dt.strftime("%Y/%m/%d") -.. ipython:: python +.. code-block:: python # PeriodIndex s = pd.Series(pd.period_range("20130101", periods=4)) @@ -212,7 +210,7 @@ Series.dt.total_seconds ``pd.Series`` of type ``timedelta64`` has new method ``.dt.total_seconds()`` returning the duration of the timedelta in seconds (:issue:`10817`) -.. ipython:: python +.. code-block:: python # TimedeltaIndex s = pd.Series(pd.timedelta_range("1 minutes", periods=4)) @@ -228,7 +226,7 @@ Period frequency enhancement A multiplied freq represents a span of corresponding length. The example below creates a period of 3 days. Addition and subtraction will shift the period by its span. -.. ipython:: python +.. code-block:: python p = pd.Period("2015-08-01", freq="3D") p @@ -239,7 +237,7 @@ A multiplied freq represents a span of corresponding length. The example below c You can use the multiplied freq in ``PeriodIndex`` and ``period_range``. -.. ipython:: python +.. code-block:: python idx = pd.period_range("2015-08-01", periods=4, freq="2D") idx @@ -295,7 +293,7 @@ in the ``header`` and ``index_col`` parameters (:issue:`4679`) See the :ref:`documentation <io.excel>` for more details. -.. ipython:: python +.. code-block:: python df = pd.DataFrame( [[1, 2, 3, 4], [5, 6, 7, 8]], @@ -311,8 +309,7 @@ See the :ref:`documentation <io.excel>` for more details. df = pd.read_excel("test.xlsx", header=[0, 1], index_col=[0, 1]) df -.. ipython:: python - :suppress: +.. code-block:: python import os @@ -360,14 +357,14 @@ Some East Asian countries use Unicode characters its width is corresponding to 2 - ``display.unicode.east_asian_width``: Whether to use the Unicode East Asian Width to calculate the display text width. (:issue:`2612`) - ``display.unicode.ambiguous_as_wide``: Whether to handle Unicode characters belong to Ambiguous as Wide. (:issue:`11102`) -.. ipython:: python +.. code-block:: python df = pd.DataFrame({u"国籍": ["UK", u"日本"], u"名前": ["Alice", u"しのぶ"]}) df; .. image:: ../_static/option_unicode01.png -.. ipython:: python +.. code-block:: python pd.set_option("display.unicode.east_asian_width", True) df; @@ -376,8 +373,7 @@ Some East Asian countries use Unicode characters its width is corresponding to 2 For further details, see :ref:`here <options.east_asian_width>` -.. ipython:: python - :suppress: +.. code-block:: python pd.set_option("display.unicode.east_asian_width", False) @@ -397,7 +393,7 @@ Other enhancements Merge key in both frames ``both`` =================================== ================ - .. ipython:: python + .. code-block:: python df1 = pd.DataFrame({"col1": [0, 1], "col_left": ["a", "b"]}) df2 = pd.DataFrame({"col1": [1, 2, 2], "col_right": [2, 2, 2]}) @@ -413,7 +409,7 @@ Other enhancements - ``pd.concat`` will now use existing Series names if provided (:issue:`10698`). - .. ipython:: python + .. code-block:: python foo = pd.Series([1, 2], name="foo") bar = pd.Series([1, 2]) @@ -431,7 +427,7 @@ Other enhancements New behavior: - .. ipython:: python + .. code-block:: python pd.concat([foo, bar, baz], 1) @@ -439,14 +435,14 @@ Other enhancements - Add a ``limit_direction`` keyword argument that works with ``limit`` to enable ``interpolate`` to fill ``NaN`` values forward, backward, or both (:issue:`9218`, :issue:`10420`, :issue:`11115`) - .. ipython:: python + .. code-block:: python ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13]) ser.interpolate(limit=1, limit_direction="both") - Added a ``DataFrame.round`` method to round the values to a variable number of decimal places (:issue:`10568`). - .. ipython:: python + .. code-block:: python df = pd.DataFrame( np.random.random([3, 3]), @@ -459,7 +455,7 @@ Other enhancements - ``drop_duplicates`` and ``duplicated`` now accept a ``keep`` keyword to target first, last, and all duplicates. The ``take_last`` keyword is deprecated, see :ref:`here <whatsnew_0170.deprecations>` (:issue:`6511`, :issue:`8505`) - .. ipython:: python + .. code-block:: python s = pd.Series(["A", "B", "C", "A", "B", "D"]) s.drop_duplicates() @@ -468,14 +464,14 @@ Other enhancements - Reindex now has a ``tolerance`` argument that allows for finer control of :ref:`basics.limits_on_reindex_fill` (:issue:`10411`): - .. ipython:: python + .. code-block:: python df = pd.DataFrame({"x": range(5), "t": pd.date_range("2000-01-01", periods=5)}) df.reindex([0.1, 1.9, 3.5], method="nearest", tolerance=0.2) When used on a ``DatetimeIndex``, ``TimedeltaIndex`` or ``PeriodIndex``, ``tolerance`` will coerced into a ``Timedelta`` if possible. This allows you to specify tolerance with a string: - .. ipython:: python + .. code-block:: python df = df.set_index("t") df.reindex(pd.to_datetime(["1999-12-31"]), method="nearest", tolerance="1 day") @@ -630,13 +626,13 @@ New behavior: Of course you can coerce this as well. -.. ipython:: python +.. code-block:: python pd.to_datetime(["2009-07-31", "asd"], errors="coerce") To keep the previous behavior, you can use ``errors='ignore'``: -.. ipython:: python +.. code-block:: python pd.to_datetime(["2009-07-31", "asd"], errors="ignore") @@ -670,7 +666,7 @@ v0.17.0 can parse them as below. It works on ``DatetimeIndex`` also. New behavior: -.. ipython:: python +.. code-block:: python pd.Timestamp("2012Q2") pd.Timestamp("2014") @@ -680,7 +676,7 @@ New behavior: If you want to perform calculations based on today's date, use ``Timestamp.now()`` and ``pandas.tseries.offsets``. - .. ipython:: python + .. code-block:: python import pandas.tseries.offsets as offsets @@ -724,14 +720,13 @@ New behavior: Note that this is different from the ``numpy`` behavior where a comparison can be broadcast: -.. ipython:: python +.. code-block:: python np.array([1, 2, 3]) == np.array([1]) or it can return False if broadcasting can not be done: -.. ipython:: python - :okwarning: +.. code-block:: python np.array([1, 2, 3]) == np.array([1, 2]) @@ -740,7 +735,7 @@ Changes to boolean comparisons vs. None Boolean comparisons of a ``Series`` vs ``None`` will now be equivalent to comparing with ``np.nan``, rather than raise ``TypeError``. (:issue:`1079`). -.. ipython:: python +.. code-block:: python s = pd.Series(range(3)) s.iloc[1] = None @@ -755,13 +750,13 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python s == None Usually you simply want to know which values are null. -.. ipython:: python +.. code-block:: python s.isnull() @@ -770,7 +765,7 @@ Usually you simply want to know which values are null. You generally will want to use ``isnull/notnull`` for these types of comparisons, as ``isnull/notnull`` tells you which elements are null. One has to be mindful that ``nan's`` don't compare equal, but ``None's`` do. Note that pandas/numpy uses the fact that ``np.nan != np.nan``, and treats ``None`` like ``np.nan``. - .. ipython:: python + .. code-block:: python None == None np.nan == np.nan @@ -784,7 +779,7 @@ The default behavior for HDFStore write functions with ``format='table'`` is now Previous behavior: -.. ipython:: python +.. code-block:: python df_with_missing = pd.DataFrame( {"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]} @@ -811,14 +806,13 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python df_with_missing.to_hdf("file.h5", "df_with_missing", format="table", mode="w") pd.read_hdf("file.h5", "df_with_missing") -.. ipython:: python - :suppress: +.. code-block:: python import os @@ -851,7 +845,7 @@ did not work for values with standard formatting. It was also out of step with h Going forward the value of ``display.precision`` will directly control the number of places after the decimal, for regular formatting as well as scientific notation, similar to how numpy's ``precision`` print option works. -.. ipython:: python +.. code-block:: python pd.set_option("display.precision", 2) pd.DataFrame({"x": [123.456789]}) @@ -859,8 +853,7 @@ regular formatting as well as scientific notation, similar to how numpy's ``prec To preserve output behavior with prior versions the default value of ``display.precision`` has been reduced to ``6`` from ``7``. -.. ipython:: python - :suppress: +.. code-block:: python pd.set_option("display.precision", 6) @@ -874,7 +867,7 @@ Changes to ``Categorical.unique`` - unordered category: values and categories are sorted by appearance order. - ordered category: values are sorted by appearance order, categories keep existing order. -.. ipython:: python +.. code-block:: python cat = pd.Categorical(["C", "A", "B", "C"], categories=["A", "B", "C"], ordered=True) cat @@ -979,7 +972,7 @@ Removal of prior version deprecations/changes - Removal of ``colSpace`` parameter from ``DataFrame.to_string()``, in favor of ``col_space``, circa 0.8.0 version. - Removal of automatic time-series broadcasting (:issue:`2304`) - .. ipython:: python + .. code-block:: python np.random.seed(1234) df = pd.DataFrame( @@ -1007,7 +1000,7 @@ Removal of prior version deprecations/changes Current - .. ipython:: python + .. code-block:: python df.add(df.A, axis="index") diff --git a/doc/source/whatsnew/v0.17.1.rst b/doc/source/whatsnew/v0.17.1.rst index 6b0a28ec47568..2db4a9c201e95 100644 --- a/doc/source/whatsnew/v0.17.1.rst +++ b/doc/source/whatsnew/v0.17.1.rst @@ -49,7 +49,7 @@ an instance of :class:`~pandas.core.style.Styler` with your data attached. Here's a quick example: - .. ipython:: python + .. code-block:: python np.random.seed(123) df = pd.DataFrame(np.random.randn(10, 5), columns=list("abcde")) @@ -78,7 +78,7 @@ Enhancements - Added ``axvlines_kwds`` to parallel coordinates plot (:issue:`10709`) - Option to ``.info()`` and ``.memory_usage()`` to provide for deep introspection of memory consumption. Note that this can be expensive to compute and therefore is an optional parameter. (:issue:`11595`) - .. ipython:: python + .. code-block:: python df = pd.DataFrame({"A": ["foo"] * 1000}) # noqa: F821 df["B"] = df["A"].astype("category") @@ -91,13 +91,13 @@ Enhancements - ``Index`` now has a ``fillna`` method (:issue:`10089`) - .. ipython:: python + .. code-block:: python pd.Index([1, np.nan, 3]).fillna(2) - Series of type ``category`` now make ``.str.<...>`` and ``.dt.<...>`` accessor methods / properties available, if the categories are of that type. (:issue:`10661`) - .. ipython:: python + .. code-block:: python s = pd.Series(list("aabb")).astype("category") s diff --git a/doc/source/whatsnew/v0.18.0.rst b/doc/source/whatsnew/v0.18.0.rst index 829c04dac9f2d..07a078eee18e5 100644 --- a/doc/source/whatsnew/v0.18.0.rst +++ b/doc/source/whatsnew/v0.18.0.rst @@ -56,7 +56,7 @@ Window functions are now methods Window functions have been refactored to be methods on ``Series/DataFrame`` objects, rather than top-level functions, which are now deprecated. This allows these window-type functions, to have a similar API to that of ``.groupby``. See the full documentation :ref:`here <window.overview>` (:issue:`11603`, :issue:`12373`) -.. ipython:: python +.. code-block:: python np.random.seed(1234) df = pd.DataFrame({'A': range(10), 'B': np.random.randn(10)}) @@ -84,15 +84,16 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python r = df.rolling(window=3) These show a descriptive repr -.. ipython:: python +.. code-block:: python r + with tab-completion of available methods and properties. .. code-block:: ipython @@ -103,19 +104,19 @@ with tab-completion of available methods and properties. The methods operate on the ``Rolling`` object itself -.. ipython:: python +.. code-block:: python r.mean() They provide getitem accessors -.. ipython:: python +.. code-block:: python r['A'].mean() And multiple aggregations -.. ipython:: python +.. code-block:: python r.agg({'A': ['mean', 'std'], 'B': ['mean', 'std']}) @@ -128,12 +129,12 @@ Changes to rename ``Series.rename`` and ``NDFrame.rename_axis`` can now take a scalar or list-like argument for altering the Series or axis *name*, in addition to their old behaviors of altering labels. (:issue:`9494`, :issue:`11965`) -.. ipython:: python +.. code-block:: python s = pd.Series(np.random.randn(5)) s.rename('newname') -.. ipython:: python +.. code-block:: python df = pd.DataFrame(np.random.randn(5, 2)) (df.rename_axis("indexname") @@ -170,7 +171,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python s = pd.Series(range(1000)) s.index @@ -209,20 +210,20 @@ Currently the default is ``expand=None`` which gives a ``FutureWarning`` and use Extracting a regular expression with one group returns a Series if ``expand=False``. -.. ipython:: python +.. code-block:: python pd.Series(['a1', 'b2', 'c3']).str.extract(r'[ab](\d)', expand=False) It returns a ``DataFrame`` with one column if ``expand=True``. -.. ipython:: python +.. code-block:: python pd.Series(['a1', 'b2', 'c3']).str.extract(r'[ab](\d)', expand=True) Calling on an ``Index`` with a regex with exactly one capture group returns an ``Index`` if ``expand=False``. -.. ipython:: python +.. code-block:: python s = pd.Series(["a1", "b2", "c3"], ["A11", "B22", "C33"]) s.index @@ -230,7 +231,7 @@ returns an ``Index`` if ``expand=False``. It returns a ``DataFrame`` with one column if ``expand=True``. -.. ipython:: python +.. code-block:: python s.index.str.extract("(?P<letter>[a-zA-Z])", expand=True) @@ -244,7 +245,7 @@ raises ``ValueError`` if ``expand=False``. It returns a ``DataFrame`` if ``expand=True``. -.. ipython:: python +.. code-block:: python s.index.str.extract("(?P<letter>[a-zA-Z])([0-9]+)", expand=True) @@ -261,7 +262,7 @@ The :ref:`.str.extractall <text.extractall>` method was added (:issue:`11386`). Unlike ``extract``, which returns only the first match. -.. ipython:: python +.. code-block:: python s = pd.Series(["a1a2", "b1", "c1"], ["A", "B", "C"]) s @@ -269,7 +270,7 @@ match. The ``extractall`` method returns all matches. -.. ipython:: python +.. code-block:: python s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)") @@ -282,7 +283,7 @@ The method ``.str.cat()`` concatenates the members of a ``Series``. Before, if ` A new, friendlier ``ValueError`` is added to protect against the mistake of supplying the ``sep`` as an arg, rather than as a kwarg. (:issue:`11334`). -.. ipython:: python +.. code-block:: python pd.Series(['a', 'b', np.nan, 'c']).str.cat(sep=' ') pd.Series(['a', 'b', np.nan, 'c']).str.cat(sep=' ', na_rep='?') @@ -302,7 +303,7 @@ Datetimelike rounding Naive datetimes -.. ipython:: python +.. code-block:: python dr = pd.date_range('20130101 09:12:56.1234', periods=3) dr @@ -314,7 +315,7 @@ Naive datetimes Tz-aware are rounded, floored and ceiled in local times -.. ipython:: python +.. code-block:: python dr = dr.tz_localize('US/Eastern') dr @@ -322,7 +323,7 @@ Tz-aware are rounded, floored and ceiled in local times Timedeltas -.. ipython:: python +.. code-block:: python t = pd.timedelta_range('1 days 2 hr 13 min 45 us', periods=3, freq='d') t @@ -335,7 +336,7 @@ Timedeltas In addition, ``.round()``, ``.floor()`` and ``.ceil()`` will be available through the ``.dt`` accessor of ``Series``. -.. ipython:: python +.. code-block:: python s = pd.Series(dr) s @@ -371,7 +372,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python s = pd.Series([1, 2, 3], index=np.arange(3.)) s @@ -408,7 +409,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'a': [0, 1, 1], 'b': pd.Series([100, 200, 300], dtype='uint32')}) @@ -445,7 +446,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python df = pd.DataFrame(np.array(range(1,10)).reshape(3,3), columns=list('abc'), @@ -550,7 +551,7 @@ are now also defined for ``NaT`` (:issue:`11564`). ``NaT`` now supports arithmetic operations with integers and floats. -.. ipython:: python +.. code-block:: python pd.NaT * 1 pd.NaT * 1.5 @@ -559,7 +560,7 @@ are now also defined for ``NaT`` (:issue:`11564`). ``NaT`` defines more arithmetic operations with ``datetime64[ns]`` and ``timedelta64[ns]``. -.. ipython:: python +.. code-block:: python pd.NaT / pd.NaT pd.Timedelta('1s') / pd.NaT @@ -568,7 +569,7 @@ are now also defined for ``NaT`` (:issue:`11564`). Given the ambiguity, it is treated as a ``timedelta64[ns]``, which allows more operations to succeed. -.. ipython:: python +.. code-block:: python pd.NaT + pd.NaT @@ -591,19 +592,19 @@ the ``dtype`` information is respected. TypeError: can only operate on a datetimes for subtraction, but the operator [__add__] was passed -.. ipython:: python +.. code-block:: python pd.Series([pd.NaT], dtype='<m8[ns]') + pd.Series([pd.NaT], dtype='<m8[ns]') ``Timedelta`` division by ``floats`` now works. -.. ipython:: python +.. code-block:: python pd.Timedelta('1s') / 2.0 Subtraction by ``Timedelta`` in a ``Series`` by a ``Timestamp`` works (:issue:`11925`) -.. ipython:: python +.. code-block:: python ser = pd.Series(pd.timedelta_range('1 day', periods=3)) ser @@ -667,7 +668,7 @@ Previous signature New signature -.. ipython:: python +.. code-block:: python pd.Series([0,1]).rank(axis=0, method='average', numeric_only=None, na_option='keep', ascending=True, pct=False) @@ -685,7 +686,7 @@ The general semantics of anchored offsets for ``n=0`` is to not move the date when it is an anchor point (e.g., a quarter start date), and otherwise roll forward to the next anchor point. -.. ipython:: python +.. code-block:: python d = pd.Timestamp('2014-02-01') d @@ -705,7 +706,7 @@ For the ``QuarterBegin`` offset in previous versions, the date would be rolled This behavior has been corrected in version 0.18.0, which is consistent with other anchored offsets like ``MonthBegin`` and ``YearBegin``. -.. ipython:: python +.. code-block:: python d = pd.Timestamp('2014-02-15') d + pd.offsets.QuarterBegin(n=0, startingMonth=2) @@ -717,7 +718,7 @@ Resample API Like the change in the window functions API :ref:`above <whatsnew_0180.enhancements.moments>`, ``.resample(...)`` is changing to have a more groupby-like API. (:issue:`11732`, :issue:`12702`, :issue:`12202`, :issue:`12332`, :issue:`12334`, :issue:`12348`, :issue:`12448`). -.. ipython:: python +.. code-block:: python np.random.seed(1234) df = pd.DataFrame(np.random.rand(10,4), @@ -761,8 +762,7 @@ You could also specify a ``how`` directly Now, you can write ``.resample(..)`` as a 2-stage operation like ``.groupby(...)``, which yields a ``Resampler``. -.. ipython:: python - :okwarning: +.. code-block:: python r = df.resample('2s') r @@ -773,29 +773,29 @@ Downsampling You can then use this object to perform operations. These are downsampling operations (going from a higher frequency to a lower one). -.. ipython:: python +.. code-block:: python r.mean() -.. ipython:: python +.. code-block:: python r.sum() Furthermore, resample now supports ``getitem`` operations to perform the resample on specific columns. -.. ipython:: python +.. code-block:: python r[['A','C']].mean() and ``.aggregate`` type operations. -.. ipython:: python +.. code-block:: python r.agg({'A' : 'mean', 'B' : 'sum'}) These accessors can of course, be combined -.. ipython:: python +.. code-block:: python r[['A','B']].agg(['mean','sum']) @@ -808,7 +808,7 @@ Upsampling operations take you from a lower frequency to a higher frequency. The performed with the ``Resampler`` objects with :meth:`~Resampler.backfill`, :meth:`~Resampler.ffill`, :meth:`~Resampler.fillna` and :meth:`~Resampler.asfreq` methods. -.. ipython:: python +.. code-block:: python s = pd.Series(np.arange(5, dtype='int64'), index=pd.date_range('2010-01-01', periods=5, freq='Q')) @@ -837,7 +837,7 @@ Previously New API -.. ipython:: python +.. code-block:: python s.resample('M').ffill() @@ -891,7 +891,7 @@ Previous API will work but with deprecations The new API will: - .. ipython:: python + .. code-block:: python df.resample('2s').min() @@ -900,7 +900,7 @@ Previous API will work but with deprecations To replicate the original operation - .. ipython:: python + .. code-block:: python df.resample('2s').mean().min() @@ -910,13 +910,12 @@ Changes to eval In prior versions, new columns assignments in an ``eval`` expression resulted in an inplace change to the ``DataFrame``. (:issue:`9297`, :issue:`8664`, :issue:`10486`) -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'a': np.linspace(0, 10, 5), 'b': range(5)}) df -.. ipython:: python - :suppress: +.. code-block:: python df.eval('c = a + b', inplace=True) @@ -938,7 +937,7 @@ in an inplace change to the ``DataFrame``. (:issue:`9297`, :issue:`8664`, :issue In version 0.18.0, a new ``inplace`` keyword was added to choose whether the assignment should be done inplace or return a copy. -.. ipython:: python +.. code-block:: python df df.eval('d = c - b', inplace=False) @@ -954,7 +953,7 @@ assignment should be done inplace or return a copy. The ``inplace`` keyword parameter was also added the ``query`` method. -.. ipython:: python +.. code-block:: python df.query('a > 5') df.query('a > 5', inplace=True) @@ -969,7 +968,7 @@ The ``inplace`` keyword parameter was also added the ``query`` method. assignments. These expressions will be evaluated one at a time in order. Only assignments are valid for multi-line expressions. -.. ipython:: python +.. code-block:: python df df.eval(""" @@ -985,7 +984,7 @@ Other API changes ^^^^^^^^^^^^^^^^^ - ``DataFrame.between_time`` and ``Series.between_time`` now only parse a fixed set of time strings. Parsing of date strings is no longer supported and raises a ``ValueError``. (:issue:`11818`) - .. ipython:: python + .. code-block:: python s = pd.Series(range(10), pd.date_range('2015-01-01', freq='H', periods=10)) s.between_time("7:00am", "9:00am") @@ -1067,7 +1066,7 @@ Removal of deprecated float indexers In :issue:`4892` indexing with floating point numbers on a non-``Float64Index`` was deprecated (in version 0.14.0). In 0.18.0, this deprecation warning is removed and these will now raise a ``TypeError``. (:issue:`12165`, :issue:`12333`) -.. ipython:: python +.. code-block:: python s = pd.Series([1, 2, 3], index=[4, 5, 6]) s @@ -1115,14 +1114,14 @@ For iloc, getting & setting via a float scalar will always raise. Other indexers will coerce to a like integer for both getting and setting. The ``FutureWarning`` has been dropped for ``.loc``, ``.ix`` and ``[]``. -.. ipython:: python +.. code-block:: python s[5.0] s.loc[5.0] and setting -.. ipython:: python +.. code-block:: python s_copy = s.copy() s_copy[5.0] = 10 @@ -1146,19 +1145,19 @@ Positional setting with ``.ix`` and a float indexer will ADD this value to the i Slicing will also coerce integer-like floats to integers for a non-``Float64Index``. -.. ipython:: python +.. code-block:: python s.loc[5.0:6] Note that for floats that are NOT coercible to ints, the label based bounds will be excluded -.. ipython:: python +.. code-block:: python s.loc[5.1:6] Float indexing on a ``Float64Index`` is unchanged. -.. ipython:: python +.. code-block:: python s = pd.Series([1, 2, 3], index=np.arange(3.)) s[1.0] diff --git a/doc/source/whatsnew/v0.18.1.rst b/doc/source/whatsnew/v0.18.1.rst index 3db00f686d62c..0d7783d24ae80 100644 --- a/doc/source/whatsnew/v0.18.1.rst +++ b/doc/source/whatsnew/v0.18.1.rst @@ -38,7 +38,7 @@ The ``CustomBusinessHour`` is a mixture of ``BusinessHour`` and ``CustomBusiness allows you to specify arbitrary holidays. For details, see :ref:`Custom Business Hour <timeseries.custombusinesshour>` (:issue:`11514`) -.. ipython:: python +.. code-block:: python from pandas.tseries.offsets import CustomBusinessHour from pandas.tseries.holiday import USFederalHolidayCalendar @@ -47,7 +47,7 @@ see :ref:`Custom Business Hour <timeseries.custombusinesshour>` (:issue:`11514`) Friday before MLK Day -.. ipython:: python +.. code-block:: python import datetime @@ -57,7 +57,7 @@ Friday before MLK Day Tuesday after MLK Day (Monday is skipped because it's a holiday) -.. ipython:: python +.. code-block:: python dt + bhour_us * 2 @@ -72,24 +72,24 @@ You can now use ``.rolling(..)`` and ``.expanding(..)`` as methods on groupbys. Previously you would have to do this to get a rolling window mean per-group: -.. ipython:: python +.. code-block:: python df = pd.DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}) df -.. ipython:: python +.. code-block:: python df.groupby("A").apply(lambda x: x.rolling(4).B.mean()) Now you can do: -.. ipython:: python +.. code-block:: python df.groupby("A").rolling(4).B.mean() For ``.resample(..)`` type of operations, previously you would have to: -.. ipython:: python +.. code-block:: python df = pd.DataFrame( { @@ -101,13 +101,13 @@ For ``.resample(..)`` type of operations, previously you would have to: df -.. ipython:: python +.. code-block:: python df.groupby("group").apply(lambda x: x.resample("1D").ffill()) Now you can do: -.. ipython:: python +.. code-block:: python df.groupby("group").resample("1D").ffill() @@ -130,7 +130,7 @@ Methods ``.where()`` and ``.mask()`` These can accept a callable for the condition and ``other`` arguments. -.. ipython:: python +.. code-block:: python df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) df.where(lambda x: x > 4, lambda x: x + 10) @@ -141,7 +141,7 @@ Methods ``.loc[]``, ``.iloc[]``, ``.ix[]`` These can accept a callable, and a tuple of callable as a slicer. The callable can return a valid boolean indexer or anything which is valid for these indexer's input. -.. ipython:: python +.. code-block:: python # callable returns bool indexer df.loc[lambda x: x.A >= 2, lambda x: x.sum() > 10] @@ -156,14 +156,14 @@ Finally, you can use a callable in ``[]`` indexing of Series, DataFrame and Pane The callable must return a valid input for ``[]`` indexing depending on its class and index type. -.. ipython:: python +.. code-block:: python df[lambda x: "A"] Using these methods / indexers, you can chain data selection operations without using temporary variable. -.. ipython:: python +.. code-block:: python bb = pd.read_csv("data/baseball.csv", index_col="id") (bb.groupby(["year", "team"]).sum().loc[lambda df: df.r > 100]) @@ -175,7 +175,7 @@ Partial string indexing on ``DatetimeIndex`` when part of a ``MultiIndex`` Partial string indexing now matches on ``DateTimeIndex`` when part of a ``MultiIndex`` (:issue:`10331`) -.. ipython:: python +.. code-block:: python dft2 = pd.DataFrame( np.random.randn(20, 1), @@ -189,7 +189,7 @@ Partial string indexing now matches on ``DateTimeIndex`` when part of a ``MultiI On other levels -.. ipython:: python +.. code-block:: python idx = pd.IndexSlice dft2 = dft2.swaplevel(0, 1).sort_index() @@ -203,7 +203,7 @@ Assembling datetimes ``pd.to_datetime()`` has gained the ability to assemble datetimes from a passed in ``DataFrame`` or a dict. (:issue:`8158`). -.. ipython:: python +.. code-block:: python df = pd.DataFrame( {"year": [2015, 2016], "month": [2, 3], "day": [4, 5], "hour": [2, 3]} @@ -212,13 +212,13 @@ Assembling datetimes Assembling using the passed frame. -.. ipython:: python +.. code-block:: python pd.to_datetime(df) You can pass only the columns that you need to assemble. -.. ipython:: python +.. code-block:: python pd.to_datetime(df[["year", "month", "day"]]) @@ -239,7 +239,7 @@ Other enhancements - ``Index.take`` now handles ``allow_fill`` and ``fill_value`` consistently (:issue:`12631`) - .. ipython:: python + .. code-block:: python idx = pd.Index([1.0, 2.0, 3.0, 4.0], dtype="float") @@ -249,7 +249,7 @@ Other enhancements - ``Index`` now supports ``.str.get_dummies()`` which returns ``MultiIndex``, see :ref:`Creating Indicator Variables <text.indicator>` (:issue:`10008`, :issue:`10103`) - .. ipython:: python + .. code-block:: python idx = pd.Index(["a|b", "a|c", "b|c"]) idx.str.get_dummies("|") @@ -309,7 +309,7 @@ Method ``.groupby(..).nth()`` changes The index in ``.groupby(..).nth()`` output is now more consistent when the ``as_index`` argument is passed (:issue:`11039`): -.. ipython:: python +.. code-block:: python df = pd.DataFrame({"A": ["a", "b", "a"], "B": [1, 2, 3]}) df @@ -332,14 +332,14 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python df.groupby("A", as_index=True)["B"].nth(0) df.groupby("A", as_index=False)["B"].nth(0) Furthermore, previously, a ``.groupby`` would always sort, regardless if ``sort=False`` was passed with ``.nth()``. -.. ipython:: python +.. code-block:: python np.random.seed(1234) df = pd.DataFrame(np.random.randn(100, 2), columns=["a", "b"]) @@ -369,7 +369,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python df.groupby("c", sort=True).nth(1) df.groupby("c", sort=False).nth(1) @@ -416,7 +416,7 @@ Using ``.apply`` on GroupBy resampling Using ``apply`` on resampling groupby operations (using a ``pd.TimeGrouper``) now has the same output types as similar ``apply`` calls on other groupby operations. (:issue:`11742`). -.. ipython:: python +.. code-block:: python df = pd.DataFrame( {"date": pd.to_datetime(["10/10/2000", "11/10/2000"]), "value": [10, 13]} diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst index 340e1ce9ee1ef..29abc32f16bae 100644 --- a/doc/source/whatsnew/v0.19.0.rst +++ b/doc/source/whatsnew/v0.19.0.rst @@ -47,7 +47,7 @@ support asof style joining of time-series (:issue:`1870`, :issue:`13695`, :issue The :func:`merge_asof` performs an asof merge, which is similar to a left-join except that we match on nearest key rather than equal keys. -.. ipython:: python +.. code-block:: python left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) @@ -58,13 +58,13 @@ except that we match on nearest key rather than equal keys. We typically want to match exactly when possible, and use the most recent value otherwise. -.. ipython:: python +.. code-block:: python pd.merge_asof(left, right, on="a") We can also match rows ONLY with prior data, and not an exact match. -.. ipython:: python +.. code-block:: python pd.merge_asof(left, right, on="a", allow_exact_matches=False) @@ -72,7 +72,7 @@ We can also match rows ONLY with prior data, and not an exact match. In a typical time-series example, we have ``trades`` and ``quotes`` and we want to ``asof-join`` them. This also illustrates using the ``by`` parameter to group data before merging. -.. ipython:: python +.. code-block:: python trades = pd.DataFrame( { @@ -113,7 +113,7 @@ This also illustrates using the ``by`` parameter to group data before merging. columns=["time", "ticker", "bid", "ask"], ) -.. ipython:: python +.. code-block:: python trades quotes @@ -122,7 +122,7 @@ An asof merge joins on the ``on``, typically a datetimelike field, which is orde in this case we are using a grouper in the ``by`` field. This is like a left-outer join, except that forward filling happens automatically taking the most recent non-NaN value. -.. ipython:: python +.. code-block:: python pd.merge_asof(trades, quotes, on="time", by="ticker") @@ -137,7 +137,7 @@ Method ``.rolling()`` is now time-series aware ``.rolling()`` objects are now time-series aware and can accept a time-series offset (or convertible) for the ``window`` argument (:issue:`13327`, :issue:`12995`). See the full documentation :ref:`here <window.generic>`. -.. ipython:: python +.. code-block:: python dft = pd.DataFrame( {"B": [0, 1, 2, np.nan, 4]}, @@ -147,20 +147,20 @@ See the full documentation :ref:`here <window.generic>`. This is a regular frequency index. Using an integer window parameter works to roll along the window frequency. -.. ipython:: python +.. code-block:: python dft.rolling(2).sum() dft.rolling(2, min_periods=1).sum() Specifying an offset allows a more intuitive specification of the rolling frequency. -.. ipython:: python +.. code-block:: python dft.rolling("2s").sum() Using a non-regular, but still monotonic index, rolling with an integer window does not impart any special calculation. -.. ipython:: python +.. code-block:: python dft = pd.DataFrame( @@ -182,14 +182,14 @@ Using a non-regular, but still monotonic index, rolling with an integer window d Using the time-specification generates variable windows for this sparse data. -.. ipython:: python +.. code-block:: python dft.rolling("2s").sum() Furthermore, we now allow an optional ``on`` parameter to specify a column (rather than the default of the index) in a DataFrame. -.. ipython:: python +.. code-block:: python dft = dft.reset_index() dft @@ -200,15 +200,14 @@ default of the index) in a DataFrame. Method ``read_csv`` has improved support for duplicate column names ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. ipython:: python - :suppress: +.. code-block:: python from io import StringIO :ref:`Duplicate column names <io.dupe_names>` are now supported in :func:`read_csv` whether they are in the file or passed in as the ``names`` parameter (:issue:`7160`, :issue:`9424`) -.. ipython:: python +.. code-block:: python data = "0,1,2\n3,4,5" names = ["a", "b", "a"] @@ -228,8 +227,7 @@ contained the values ``[0, 3]``. **New behavior**: -.. ipython:: python - :okexcept: +.. code-block:: python pd.read_csv(StringIO(data), names=names) @@ -244,7 +242,7 @@ specified as a dtype (:issue:`10153`). Depending on the structure of the data, this can result in a faster parse time and lower memory usage compared to converting to ``Categorical`` after parsing. See the io :ref:`docs here <io.categorical>`. -.. ipython:: python +.. code-block:: python data = """ col1,col2,col3 @@ -259,7 +257,7 @@ converting to ``Categorical`` after parsing. See the io :ref:`docs here <io.cat Individual columns can be parsed as a ``Categorical`` using a dict specification -.. ipython:: python +.. code-block:: python pd.read_csv(StringIO(data), dtype={"col1": "category"}).dtypes @@ -270,7 +268,7 @@ Individual columns can be parsed as a ``Categorical`` using a dict specification :func:`to_numeric` function, or as appropriate, another converter such as :func:`to_datetime`. - .. ipython:: python + .. code-block:: python df = pd.read_csv(StringIO(data), dtype="category") df.dtypes @@ -285,7 +283,7 @@ Categorical concatenation - A function :func:`union_categoricals` has been added for combining categoricals, see :ref:`Unioning Categoricals<categorical.union>` (:issue:`13361`, :issue:`13763`, :issue:`13846`, :issue:`14173`) - .. ipython:: python + .. code-block:: python from pandas.api.types import union_categoricals @@ -295,7 +293,7 @@ Categorical concatenation - ``concat`` and ``append`` now can concat ``category`` dtypes with different ``categories`` as ``object`` dtype (:issue:`13524`) - .. ipython:: python + .. code-block:: python s1 = pd.Series(["a", "b"], dtype="category") s2 = pd.Series(["b", "c"], dtype="category") @@ -309,7 +307,7 @@ Categorical concatenation **New behavior**: -.. ipython:: python +.. code-block:: python pd.concat([s1, s2]) @@ -322,13 +320,13 @@ pandas has gained new frequency offsets, ``SemiMonthEnd`` ('SM') and ``SemiMonth These provide date offsets anchored (by default) to the 15th and end of month, and 15th and 1st of month respectively. (:issue:`1543`) -.. ipython:: python +.. code-block:: python from pandas.tseries.offsets import SemiMonthEnd, SemiMonthBegin **SemiMonthEnd**: -.. ipython:: python +.. code-block:: python pd.Timestamp("2016-01-01") + SemiMonthEnd() @@ -336,7 +334,7 @@ These provide date offsets anchored (by default) to the 15th and end of month, a **SemiMonthBegin**: -.. ipython:: python +.. code-block:: python pd.Timestamp("2016-01-01") + SemiMonthBegin() @@ -344,7 +342,7 @@ These provide date offsets anchored (by default) to the 15th and end of month, a Using the anchoring suffix, you can also specify the day of month to use instead of the 15th. -.. ipython:: python +.. code-block:: python pd.date_range("2015-01-01", freq="SMS-16", periods=4) @@ -359,7 +357,7 @@ The following methods and options are added to ``Index``, to be more consistent ``Index`` now supports the ``.where()`` function for same shape indexing (:issue:`13170`) -.. ipython:: python +.. code-block:: python idx = pd.Index(["a", "b", "c"]) idx.where([True, False, True]) @@ -367,7 +365,7 @@ The following methods and options are added to ``Index``, to be more consistent ``Index`` now supports ``.dropna()`` to exclude missing values (:issue:`6194`) -.. ipython:: python +.. code-block:: python idx = pd.Index([1, 2, np.nan, 4]) idx.dropna() @@ -375,7 +373,7 @@ The following methods and options are added to ``Index``, to be more consistent For ``MultiIndex``, values are dropped if any level is missing by default. Specifying ``how='all'`` only drops values where all levels are missing. -.. ipython:: python +.. code-block:: python midx = pd.MultiIndex.from_arrays([[1, 2, np.nan, 4], [1, 2, np.nan, np.nan]]) midx @@ -384,7 +382,7 @@ For ``MultiIndex``, values are dropped if any level is missing by default. Speci ``Index`` now supports ``.str.extractall()`` which returns a ``DataFrame``, see the :ref:`docs here <text.extractall>` (:issue:`10008`, :issue:`13156`) -.. ipython:: python +.. code-block:: python idx = pd.Index(["a1a2", "b1", "c1"]) idx.str.extractall(r"[ab](?P<digit>\d)") @@ -429,7 +427,7 @@ The ``pd.get_dummies`` function now returns dummy-encoded columns as small integ **New behavior**: -.. ipython:: python +.. code-block:: python pd.get_dummies(["a", "b", "a", "c"]).dtypes @@ -441,7 +439,7 @@ Downcast values to smallest possible dtype in ``to_numeric`` ``pd.to_numeric()`` now accepts a ``downcast`` parameter, which will downcast the data if possible to smallest specified numerical dtype (:issue:`13352`) -.. ipython:: python +.. code-block:: python s = ["1", 2, 3] pd.to_numeric(s, downcast="unsigned") @@ -459,7 +457,7 @@ will be published in future versions of pandas (:issue:`13147`, :issue:`13634`) The following are now part of this API: -.. ipython:: python +.. code-block:: python import pprint from pandas.api import types @@ -479,7 +477,7 @@ Other enhancements - ``Timestamp`` can now accept positional and keyword parameters similar to :func:`datetime.datetime` (:issue:`10758`, :issue:`11630`) - .. ipython:: python + .. code-block:: python pd.Timestamp(2012, 1, 1) @@ -487,7 +485,7 @@ Other enhancements - The ``.resample()`` function now accepts a ``on=`` or ``level=`` parameter for resampling on a datetimelike column or ``MultiIndex`` level (:issue:`13500`) - .. ipython:: python + .. code-block:: python df = pd.DataFrame( {"date": pd.date_range("2015-01-01", freq="W", periods=5), "a": np.arange(5)}, @@ -522,7 +520,7 @@ Other enhancements - ``DataFrame`` has gained support to re-order the columns based on the values in a row using ``df.sort_values(by='...', axis=1)`` (:issue:`10806`) - .. ipython:: python + .. code-block:: python df = pd.DataFrame({"A": [2, 7], "B": [3, 5], "C": [4, 8]}, index=["row1", "row2"]) df @@ -556,7 +554,7 @@ API changes ``Series.tolist()`` will now return Python types in the output, mimicking NumPy ``.tolist()`` behavior (:issue:`10904`) -.. ipython:: python +.. code-block:: python s = pd.Series([1, 2, 3]) @@ -570,7 +568,7 @@ API changes **New behavior**: -.. ipython:: python +.. code-block:: python type(s.tolist()[0]) @@ -597,7 +595,7 @@ Arithmetic operators Arithmetic operators align both ``index`` (no changes). -.. ipython:: python +.. code-block:: python s1 = pd.Series([1, 2, 3], index=list("ABC")) s2 = pd.Series([2, 2, 2], index=list("ABD")) @@ -637,13 +635,13 @@ Comparison operators raise ``ValueError`` when ``.index`` are different. To achieve the same result as previous versions (compare values based on locations ignoring ``.index``), compare both ``.values``. - .. ipython:: python + .. code-block:: python s1.values == s2.values If you want to compare ``Series`` aligning its ``.index``, see flexible comparison methods section below: - .. ipython:: python + .. code-block:: python s1.eq(s2) @@ -675,7 +673,7 @@ Logical operators align both ``.index`` of left and right hand side. **New behavior** (``Series``): -.. ipython:: python +.. code-block:: python s1 = pd.Series([True, False, True], index=list("ABC")) s2 = pd.Series([True, True, True], index=list("ABD")) @@ -687,13 +685,13 @@ Logical operators align both ``.index`` of left and right hand side. .. note:: To achieve the same result as previous versions (compare values based on only left hand side index), you can use ``reindex_like``: - .. ipython:: python + .. code-block:: python s1 & s2.reindex_like(s1) **Current behavior** (``DataFrame``, no change): -.. ipython:: python +.. code-block:: python df1 = pd.DataFrame([True, False, True], index=list("ABC")) df2 = pd.DataFrame([True, True, True], index=list("ABD")) @@ -705,7 +703,7 @@ Flexible comparison methods ``Series`` flexible comparison methods like ``eq``, ``ne``, ``le``, ``lt``, ``ge`` and ``gt`` now align both ``index``. Use these operators if you want to compare two ``Series`` which has the different ``index``. -.. ipython:: python +.. code-block:: python s1 = pd.Series([1, 2, 3], index=["a", "b", "c"]) s2 = pd.Series([2, 2, 2], index=["b", "c", "d"]) @@ -722,8 +720,7 @@ Previously, this worked the same as comparison operators (see above). A ``Series`` will now correctly promote its dtype for assignment with incompat values to the current dtype (:issue:`13234`) -.. ipython:: python - :okwarning: +.. code-block:: python s = pd.Series() @@ -738,7 +735,7 @@ A ``Series`` will now correctly promote its dtype for assignment with incompat v **New behavior**: -.. ipython:: python +.. code-block:: python s["a"] = pd.Timestamp("2016-01-01") s["b"] = 3.0 @@ -763,7 +760,7 @@ Previously if ``.to_datetime()`` encountered mixed integers/floats and strings, This will now convert integers/floats with the default unit of ``ns``. -.. ipython:: python +.. code-block:: python pd.to_datetime([1, "foo"], errors="coerce") @@ -782,7 +779,7 @@ Merging changes Merging will now preserve the dtype of the join keys (:issue:`8596`) -.. ipython:: python +.. code-block:: python df1 = pd.DataFrame({"key": [1], "v1": [10]}) df1 @@ -810,7 +807,7 @@ Merging will now preserve the dtype of the join keys (:issue:`8596`) We are able to preserve the join keys -.. ipython:: python +.. code-block:: python pd.merge(df1, df2, how="outer") pd.merge(df1, df2, how="outer").dtypes @@ -818,7 +815,7 @@ We are able to preserve the join keys Of course if you have missing values that are introduced, then the resulting dtype will be upcast, which is unchanged from previous. -.. ipython:: python +.. code-block:: python pd.merge(df1, df2, how="outer", on="key") pd.merge(df1, df2, how="outer", on="key").dtypes @@ -830,7 +827,7 @@ Method ``.describe()`` changes Percentile identifiers in the index of a ``.describe()`` output will now be rounded to the least precision that keeps them distinct (:issue:`13104`) -.. ipython:: python +.. code-block:: python s = pd.Series([0, 1, 2, 3, 4]) df = pd.DataFrame([0, 1, 2, 3, 4]) @@ -864,7 +861,7 @@ The percentiles were rounded to at most one decimal place, which could raise ``V **New behavior**: -.. ipython:: python +.. code-block:: python s.describe(percentiles=[0.0001, 0.0005, 0.001, 0.999, 0.9995, 0.9999]) df.describe(percentiles=[0.0001, 0.0005, 0.001, 0.999, 0.9995, 0.9999]) @@ -903,7 +900,7 @@ As a consequence of this change, ``PeriodIndex`` no longer has an integer dtype: **New behavior**: -.. ipython:: python +.. code-block:: python pi = pd.PeriodIndex(["2016-08-01"], freq="D") pi @@ -930,7 +927,7 @@ Previously, ``Period`` has its own ``Period('NaT')`` representation different fr These result in ``pd.NaT`` without providing ``freq`` option. -.. ipython:: python +.. code-block:: python pd.Period("NaT") pd.Period(None) @@ -948,7 +945,7 @@ To be compatible with ``Period`` addition and subtraction, ``pd.NaT`` now suppor **New behavior**: -.. ipython:: python +.. code-block:: python pd.NaT + 1 pd.NaT - 1 @@ -969,7 +966,7 @@ of integers (:issue:`13988`). **New behavior**: -.. ipython:: python +.. code-block:: python pi = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") pi.values @@ -999,7 +996,7 @@ Previous behavior: **New behavior**: the same operation will now perform element-wise addition: -.. ipython:: python +.. code-block:: python pd.Index(["a", "b"]) + pd.Index(["a", "c"]) @@ -1007,7 +1004,7 @@ Note that numeric Index objects already performed element-wise operations. For example, the behavior of adding two integer Indexes is unchanged. The base ``Index`` is now made consistent with this behavior. -.. ipython:: python +.. code-block:: python pd.Index([1, 2, 3]) + pd.Index([2, 3, 4]) @@ -1025,7 +1022,7 @@ DatetimeIndex objects resulting in a TimedeltaIndex: **New behavior**: -.. ipython:: python +.. code-block:: python ( pd.DatetimeIndex(["2016-01-01", "2016-01-02"]) @@ -1040,7 +1037,7 @@ DatetimeIndex objects resulting in a TimedeltaIndex: ``Index.difference`` and ``Index.symmetric_difference`` will now, more consistently, treat ``NaN`` values as any other values. (:issue:`13514`) -.. ipython:: python +.. code-block:: python idx1 = pd.Index([1, 2, 3, np.nan]) idx2 = pd.Index([0, 1, np.nan]) @@ -1057,7 +1054,7 @@ DatetimeIndex objects resulting in a TimedeltaIndex: **New behavior**: -.. ipython:: python +.. code-block:: python idx1.difference(idx2) idx1.symmetric_difference(idx2) @@ -1088,7 +1085,7 @@ Previously, most ``Index`` classes returned ``np.ndarray``, and ``DatetimeIndex` **New behavior**: -.. ipython:: python +.. code-block:: python pd.Index([1, 2, 3]).unique() pd.DatetimeIndex( @@ -1103,7 +1100,7 @@ Previously, most ``Index`` classes returned ``np.ndarray``, and ``DatetimeIndex` ``MultiIndex.from_arrays`` and ``MultiIndex.from_product`` will now preserve categorical dtype in ``MultiIndex`` levels (:issue:`13743`, :issue:`13854`). -.. ipython:: python +.. code-block:: python cat = pd.Categorical(["a", "b"], categories=list("bac")) lvl1 = ["foo", "bar"] @@ -1122,7 +1119,7 @@ in ``MultiIndex`` levels (:issue:`13743`, :issue:`13854`). **New behavior**: the single level is now a ``CategoricalIndex``: -.. ipython:: python +.. code-block:: python midx.levels[0] midx.get_level_values(0) @@ -1130,7 +1127,7 @@ in ``MultiIndex`` levels (:issue:`13743`, :issue:`13854`). An analogous change has been made to ``MultiIndex.from_product``. As a consequence, ``groupby`` and ``set_index`` also preserve categorical dtypes in indexes -.. ipython:: python +.. code-block:: python df = pd.DataFrame({"A": [0, 1], "B": [10, 11], "C": cat}) df_grouped = df.groupby(by=["A", "C"]).first() @@ -1160,7 +1157,7 @@ As a consequence, ``groupby`` and ``set_index`` also preserve categorical dtypes **New behavior**: -.. ipython:: python +.. code-block:: python df_grouped.index.levels[1] df_grouped.reset_index().dtypes @@ -1180,7 +1177,7 @@ from ``n`` for the second, and so on, so that, when concatenated, they are ident the result of calling :func:`read_csv` without the ``chunksize=`` argument (:issue:`12185`). -.. ipython:: python +.. code-block:: python data = "A,B\n0,1\n2,3\n4,5\n6,7" @@ -1198,7 +1195,7 @@ the result of calling :func:`read_csv` without the ``chunksize=`` argument **New behavior**: -.. ipython:: python +.. code-block:: python pd.concat(pd.read_csv(StringIO(data), chunksize=2)) @@ -1243,8 +1240,7 @@ Previously, sparse data were ``float64`` dtype by default, even if all inputs we As of v0.19.0, sparse data keeps the input dtype, and uses more appropriate ``fill_value`` defaults (``0`` for ``int64`` dtype, ``False`` for ``bool`` dtype). -.. ipython:: python - :okwarning: +.. code-block:: python pd.SparseArray([1, 2, 0, 0], dtype=np.int64) pd.SparseArray([True, False, False, False]) diff --git a/doc/source/whatsnew/v0.19.1.rst b/doc/source/whatsnew/v0.19.1.rst index 6ff3fb6900a99..693819d666a12 100644 --- a/doc/source/whatsnew/v0.19.1.rst +++ b/doc/source/whatsnew/v0.19.1.rst @@ -5,8 +5,7 @@ Version 0.19.1 (November 3, 2016) {{ header }} -.. ipython:: python - :suppress: +.. code-block:: python from pandas import * # noqa F401, F403 diff --git a/doc/source/whatsnew/v0.19.2.rst b/doc/source/whatsnew/v0.19.2.rst index bba89d78be869..a7b80605e6241 100644 --- a/doc/source/whatsnew/v0.19.2.rst +++ b/doc/source/whatsnew/v0.19.2.rst @@ -5,8 +5,7 @@ Version 0.19.2 (December 24, 2016) {{ header }} -.. ipython:: python - :suppress: +.. code-block:: python from pandas import * # noqa F401, F403 diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index 733995cc718dd..f9c47fd0c83d8 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -57,7 +57,7 @@ is :ref:`here <basics.aggregate>` (:issue:`1623`). Here is a sample -.. ipython:: python +.. code-block:: python df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], index=pd.date_range('1/1/2000', periods=10)) @@ -68,13 +68,13 @@ One can operate using string function names, callables, lists, or dictionaries o Using a single function is equivalent to ``.apply``. -.. ipython:: python +.. code-block:: python df.agg('sum') Multiple aggregations with a list of functions. -.. ipython:: python +.. code-block:: python df.agg(['sum', 'min']) @@ -82,21 +82,20 @@ Using a dict provides the ability to apply specific aggregations per column. You will get a matrix-like output of all of the aggregators. The output has one column per unique function. Those functions applied to a particular column will be ``NaN``: -.. ipython:: python +.. code-block:: python df.agg({'A': ['sum', 'min'], 'B': ['min', 'max']}) The API also supports a ``.transform()`` function for broadcasting results. -.. ipython:: python - :okwarning: +.. code-block:: python df.transform(['abs', lambda x: x - x.min()]) When presented with mixed dtypes that cannot be aggregated, ``.agg()`` will only take the valid aggregations. This is similar to how groupby ``.agg()`` works. (:issue:`15015`) -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'A': [1, 2, 3], 'B': [1., 2., 3.], @@ -104,7 +103,7 @@ aggregations. This is similar to how groupby ``.agg()`` works. (:issue:`15015`) 'D': pd.date_range('20130101', periods=3)}) df.dtypes -.. ipython:: python +.. code-block:: python df.agg(['min', 'sum']) @@ -116,12 +115,11 @@ Keyword argument ``dtype`` for data IO The ``'python'`` engine for :func:`read_csv`, as well as the :func:`read_fwf` function for parsing fixed-width text files and :func:`read_excel` for parsing Excel files, now accept the ``dtype`` keyword argument for specifying the types of specific columns (:issue:`14295`). See the :ref:`io docs <io.dtypes>` for more information. -.. ipython:: python - :suppress: +.. code-block:: python from io import StringIO -.. ipython:: python +.. code-block:: python data = "a b\n1 2\n3 4" pd.read_fwf(StringIO(data)).dtypes @@ -137,14 +135,14 @@ from where to compute the resulting timestamps when parsing numerical values wit For example, with 1960-01-01 as the starting date: -.. ipython:: python +.. code-block:: python pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01')) The default is set at ``origin='unix'``, which defaults to ``1970-01-01 00:00:00``, which is commonly called 'unix epoch' or POSIX time. This was the previous default, so this is a backward compatible change. -.. ipython:: python +.. code-block:: python pd.to_datetime([1, 2, 3], unit='D') @@ -156,7 +154,7 @@ GroupBy enhancements Strings passed to ``DataFrame.groupby()`` as the ``by`` parameter may now reference either column names or index level names. Previously, only column names could be referenced. This allows to easily group by a column and index level at the same time. (:issue:`5677`) -.. ipython:: python +.. code-block:: python arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] @@ -183,7 +181,7 @@ Previously, only ``gzip`` compression was supported. By default, compression of URLs and paths are now inferred using their file extensions. Additionally, support for bz2 compression in the python 2 C-engine improved (:issue:`14874`). -.. ipython:: python +.. code-block:: python url = ('https://github.com/{repo}/raw/{branch}/{path}' .format(repo='pandas-dev/pandas', @@ -205,7 +203,7 @@ can now read from and write to compressed pickle files. Compression methods can be an explicit parameter or be inferred from the file extension. See :ref:`the docs here. <io.pickle.compression>` -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'A': np.random.randn(1000), 'B': 'foo', @@ -213,7 +211,7 @@ See :ref:`the docs here. <io.pickle.compression>` Using an explicit compression type -.. ipython:: python +.. code-block:: python df.to_pickle("data.pkl.compress", compression="gzip") rt = pd.read_pickle("data.pkl.compress", compression="gzip") @@ -221,7 +219,7 @@ Using an explicit compression type The default is to infer the compression type from the extension (``compression='infer'``): -.. ipython:: python +.. code-block:: python df.to_pickle("data.pkl.gz") rt = pd.read_pickle("data.pkl.gz") @@ -230,8 +228,7 @@ The default is to infer the compression type from the extension (``compression=' rt = pd.read_pickle("s1.pkl.bz2") rt.head() -.. ipython:: python - :suppress: +.. code-block:: python import os os.remove("data.pkl.compress") @@ -248,7 +245,7 @@ or purely non-negative, integers. Previously, handling these integers would result in improper rounding or data-type casting, leading to incorrect results. Notably, a new numerical index, ``UInt64Index``, has been created (:issue:`14937`) -.. ipython:: python +.. code-block:: python idx = pd.UInt64Index([1, 2, 3]) df = pd.DataFrame({'A': ['a', 'b', 'c']}, index=idx) @@ -268,7 +265,7 @@ GroupBy on categoricals In previous versions, ``.groupby(..., sort=False)`` would fail with a ``ValueError`` when grouping on a categorical series with some categories not appearing in the data. (:issue:`13179`) -.. ipython:: python +.. code-block:: python chromosomes = np.r_[np.arange(1, 23).astype(str), ['X', 'Y']] df = pd.DataFrame({ @@ -290,7 +287,7 @@ In previous versions, ``.groupby(..., sort=False)`` would fail with a ``ValueErr **New behavior**: -.. ipython:: python +.. code-block:: python df[df.chromosomes != '1'].groupby('chromosomes', sort=False).sum() @@ -303,7 +300,7 @@ The new orient ``'table'`` for :meth:`DataFrame.to_json` will generate a `Table Schema`_ compatible string representation of the data. -.. ipython:: python +.. code-block:: python df = pd.DataFrame( {'A': [1, 2, 3], @@ -363,8 +360,7 @@ Experimental support has been added to export ``DataFrame.style`` formats to Exc For example, after running the following, ``styled.xlsx`` renders as below: -.. ipython:: python - :okwarning: +.. code-block:: python np.random.seed(24) df = pd.DataFrame({'A': np.linspace(1, 10, 10)}) @@ -380,8 +376,7 @@ For example, after running the following, ``styled.xlsx`` renders as below: .. image:: ../_static/style-excel.png -.. ipython:: python - :suppress: +.. code-block:: python import os os.remove('styled.xlsx') @@ -420,7 +415,7 @@ The returned categories were strings, representing Intervals New behavior: -.. ipython:: python +.. code-block:: python c = pd.cut(range(4), bins=2) c @@ -429,13 +424,13 @@ New behavior: Furthermore, this allows one to bin *other* data with these same bins, with ``NaN`` representing a missing value similar to other dtypes. -.. ipython:: python +.. code-block:: python pd.cut([0, 3, 5, 1], bins=c.categories) An ``IntervalIndex`` can also be used in ``Series`` and ``DataFrame`` as the index. -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'A': range(4), 'B': pd.cut([0, 3, 1, 1], bins=c.categories) @@ -444,13 +439,13 @@ An ``IntervalIndex`` can also be used in ``Series`` and ``DataFrame`` as the ind Selecting via a specific interval: -.. ipython:: python +.. code-block:: python df.loc[pd.Interval(1.5, 3.0)] Selecting via a scalar value that is contained *in* the intervals. -.. ipython:: python +.. code-block:: python df.loc[0] @@ -572,7 +567,7 @@ Map on Index types now return other Index types ``map`` on an ``Index`` now returns an ``Index``, not a numpy array (:issue:`12766`) -.. ipython:: python +.. code-block:: python idx = pd.Index([1, 2]) idx @@ -597,7 +592,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python idx.map(lambda x: x * 2) idx.map(lambda x: (x, x * 2)) @@ -609,7 +604,7 @@ New behavior: ``map`` on a ``Series`` with ``datetime64`` values may return ``int64`` dtypes rather than ``int32`` -.. ipython:: python +.. code-block:: python s = pd.Series(pd.date_range('2011-01-02T00:00', '2011-01-02T02:00', freq='H') .tz_localize('Asia/Tokyo')) @@ -628,7 +623,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python s.map(lambda x: x.hour) @@ -654,7 +649,7 @@ Previous behaviour: New behavior: -.. ipython:: python +.. code-block:: python idx = pd.date_range("2015-01-01", periods=5, freq='10H') idx.hour @@ -698,7 +693,7 @@ data-types would yield different return types. These are now made consistent. (: New behavior: - .. ipython:: python + .. code-block:: python # Series, returns an array of Timestamp tz-aware pd.Series([pd.Timestamp(r'20160101', tz=r'US/Eastern'), @@ -728,7 +723,7 @@ data-types would yield different return types. These are now made consistent. (: New behavior: - .. ipython:: python + .. code-block:: python # returns a Categorical pd.Series(list('baabc'), dtype='category').unique() @@ -750,11 +745,12 @@ Partial string indexing changes :ref:`DatetimeIndex Partial String Indexing <timeseries.partialindexing>` now works as an exact match, provided that string resolution coincides with index resolution, including a case when both are seconds (:issue:`14826`). See :ref:`Slice vs. Exact Match <timeseries.slice_vs_exact_match>` for details. -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'a': [1, 2, 3]}, pd.DatetimeIndex(['2011-12-31 23:59:59', '2012-01-01 00:00:00', '2012-01-01 00:00:01'])) + Previous behavior: .. code-block:: ipython @@ -788,7 +784,7 @@ Concat of different float dtypes will not automatically upcast Previously, ``concat`` of multiple objects with different ``float`` dtypes would automatically upcast results to a dtype of ``float64``. Now the smallest acceptable dtype will be used (:issue:`13247`) -.. ipython:: python +.. code-block:: python df1 = pd.DataFrame(np.array([1.0], dtype=np.float32, ndmin=2)) df1.dtypes @@ -807,7 +803,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python pd.concat([df1, df2]).dtypes @@ -867,7 +863,7 @@ This would happen with a ``lexsorted``, but non-monotonic levels. (:issue:`15622 This is *unchanged* from prior versions, but shown for illustration purposes: -.. ipython:: python +.. code-block:: python df = pd.DataFrame(np.arange(6), columns=['value'], index=pd.MultiIndex.from_product([list('BA'), range(3)])) @@ -883,7 +879,7 @@ This is *unchanged* from prior versions, but shown for illustration purposes: Sorting works as expected -.. ipython:: python +.. code-block:: python df.sort_index() @@ -898,7 +894,7 @@ Sorting works as expected However, this example, which has a non-monotonic 2nd level, doesn't behave as desired. -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'value': [1, 2, 3, 4]}, index=pd.MultiIndex([['a', 'b'], ['bb', 'aa']], @@ -989,7 +985,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'A': [1, 1, 2, 2], 'B': [1, 2, 3, 4]}) @@ -1008,7 +1004,7 @@ see :ref:`here <whatsnew_0200.api_breaking.deprecate_panel>`. These are equivale but a MultiIndexed ``DataFrame`` enjoys more support in pandas. See the section on :ref:`Windowed Binary Operations <window.cov_corr>` for more information. (:issue:`15677`) -.. ipython:: python +.. code-block:: python np.random.seed(1234) df = pd.DataFrame(np.random.rand(100, 2), @@ -1031,14 +1027,14 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python res = df.rolling(12).corr() res.tail() Retrieving a correlation matrix for a cross-section -.. ipython:: python +.. code-block:: python df.rolling(12).corr().loc['2016-04-07'] @@ -1051,7 +1047,7 @@ In previous versions most types could be compared to string column in a ``HDFSto usually resulting in an invalid comparison, returning an empty result frame. These comparisons will now raise a ``TypeError`` (:issue:`15492`) -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'unparsed_date': ['2014-01-01', '2014-01-01']}) df.to_hdf('store.h5', 'key', format='table', data_columns=True) @@ -1077,8 +1073,7 @@ New behavior: TypeError: Cannot compare 2014-01-01 00:00:00 of type <class 'pandas.tslib.Timestamp'> to string column -.. ipython:: python - :suppress: +.. code-block:: python import os os.remove('store.h5') @@ -1094,7 +1089,7 @@ joins, :meth:`DataFrame.join` and :func:`merge`, and the ``.align`` method. - ``Index.intersection`` - .. ipython:: python + .. code-block:: python left = pd.Index([2, 1, 0]) left @@ -1110,13 +1105,13 @@ joins, :meth:`DataFrame.join` and :func:`merge`, and the ``.align`` method. New behavior: - .. ipython:: python + .. code-block:: python left.intersection(right) - ``DataFrame.join`` and ``pd.merge`` - .. ipython:: python + .. code-block:: python left = pd.DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0]) left @@ -1135,7 +1130,7 @@ joins, :meth:`DataFrame.join` and :func:`merge`, and the ``.align`` method. New behavior: - .. ipython:: python + .. code-block:: python left.join(right, how='inner') @@ -1147,7 +1142,7 @@ Pivot table always returns a DataFrame The documentation for :meth:`pivot_table` states that a ``DataFrame`` is *always* returned. Here a bug is fixed that allowed this to return a ``Series`` under certain circumstance. (:issue:`4386`) -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E'], @@ -1168,7 +1163,7 @@ Previous behavior: New behavior: -.. ipython:: python +.. code-block:: python df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum) @@ -1336,7 +1331,7 @@ The recommended methods of indexing are: Using ``.ix`` will now show a ``DeprecationWarning`` with a link to some examples of how to convert code `here <https://pandas.pydata.org/pandas-docs/version/1.0/user_guide/indexing.html#ix-indexer-is-deprecated>`__. -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, @@ -1356,13 +1351,13 @@ Previous behavior, where you wish to get the 0th and the 2nd elements from the i Using ``.loc``. Here we will select the appropriate indexes from the index, then use *label* indexing. -.. ipython:: python +.. code-block:: python df.loc[df.index[[0, 2]], 'A'] Using ``.iloc``. Here we will get the location of the 'A' column, then use *positional* indexing to select things. -.. ipython:: python +.. code-block:: python df.iloc[[0, 2], df.columns.get_loc('A')] @@ -1455,7 +1450,7 @@ between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functio This is an illustrative example: -.. ipython:: python +.. code-block:: python df = pd.DataFrame({'A': [1, 1, 1, 2, 2], 'B': range(5), @@ -1466,7 +1461,7 @@ Here is a typical useful syntax for computing different aggregations for differe is a natural, and useful syntax. We aggregate from the dict-to-list by taking the specified columns and applying the list of functions. This returns a ``MultiIndex`` for the columns (this is *not* deprecated). -.. ipython:: python +.. code-block:: python df.groupby('A').agg({'B': 'sum', 'C': 'min'}) @@ -1487,7 +1482,7 @@ is a combination aggregation & renaming: You can accomplish the same operation, more idiomatically by: -.. ipython:: python +.. code-block:: python df.groupby('A').B.agg(['count']).rename(columns={'count': 'foo'}) @@ -1512,7 +1507,7 @@ Here's an example of the second deprecation, passing a dict-of-dict to a grouped You can accomplish nearly the same by: -.. ipython:: python +.. code-block:: python (df.groupby('A') .agg({'B': 'sum', 'C': 'min'}) diff --git a/doc/source/whatsnew/v0.20.2.rst b/doc/source/whatsnew/v0.20.2.rst index 430a39d2d2e97..fd27887e3dd91 100644 --- a/doc/source/whatsnew/v0.20.2.rst +++ b/doc/source/whatsnew/v0.20.2.rst @@ -5,8 +5,7 @@ Version 0.20.2 (June 4, 2017) {{ header }} -.. ipython:: python - :suppress: +.. code-block:: python from pandas import * # noqa F401, F403 diff --git a/doc/source/whatsnew/v0.20.3.rst b/doc/source/whatsnew/v0.20.3.rst index ff28f6830783e..647da61051ed8 100644 --- a/doc/source/whatsnew/v0.20.3.rst +++ b/doc/source/whatsnew/v0.20.3.rst @@ -5,8 +5,7 @@ Version 0.20.3 (July 7, 2017) {{ header }} -.. ipython:: python - :suppress: +.. code-block:: python from pandas import * # noqa F401, F403 diff --git a/doc/source/whatsnew/v0.5.0.rst b/doc/source/whatsnew/v0.5.0.rst index 7447a10fa1d6b..7b2af577a24d5 100644 --- a/doc/source/whatsnew/v0.5.0.rst +++ b/doc/source/whatsnew/v0.5.0.rst @@ -6,8 +6,7 @@ Version 0.5.0 (October 24, 2011) {{ header }} -.. ipython:: python - :suppress: +.. code-block:: python from pandas import * # noqa F401, F403 diff --git a/doc/source/whatsnew/v0.6.0.rst b/doc/source/whatsnew/v0.6.0.rst index 253ca4d4188e5..a3c28a23e07b4 100644 --- a/doc/source/whatsnew/v0.6.0.rst +++ b/doc/source/whatsnew/v0.6.0.rst @@ -5,8 +5,7 @@ Version 0.6.0 (November 25, 2011) {{ header }} -.. ipython:: python - :suppress: +.. code-block:: python from pandas import * # noqa F401, F403 diff --git a/doc/source/whatsnew/v0.7.0.rst b/doc/source/whatsnew/v0.7.0.rst index 2fe686d8858a2..71aeb2070e8d4 100644 --- a/doc/source/whatsnew/v0.7.0.rst +++ b/doc/source/whatsnew/v0.7.0.rst @@ -31,7 +31,7 @@ New features - Handle differently-indexed output values in ``DataFrame.apply`` (:issue:`498`) -.. ipython:: python +.. code-block:: python df = pd.DataFrame(np.random.randn(10, 4)) df.apply(lambda x: x.describe()) @@ -116,7 +116,7 @@ One of the potentially riskiest API changes in 0.7.0, but also one of the most important, was a complete review of how **integer indexes** are handled with regard to label-based indexing. Here is an example: -.. ipython:: python +.. code-block:: python s = pd.Series(np.random.randn(10), index=range(0, 20, 2)) s @@ -235,7 +235,7 @@ slice to a Series when getting and setting values via ``[]`` (i.e. the ``__getitem__`` and ``__setitem__`` methods). The behavior will be the same as passing similar input to ``ix`` **except in the case of integer indexing**: -.. ipython:: python +.. code-block:: python s = pd.Series(np.random.randn(6), index=list('acegkm')) s @@ -246,7 +246,7 @@ passing similar input to ``ix`` **except in the case of integer indexing**: In the case of integer indexes, the behavior will be exactly as before (shadowing ``ndarray``): -.. ipython:: python +.. code-block:: python s = pd.Series(np.random.randn(6), index=range(0, 12, 2)) s[[4, 0, 2]] diff --git a/doc/source/whatsnew/v0.7.3.rst b/doc/source/whatsnew/v0.7.3.rst index 4ca31baf560bb..b52581c53d54b 100644 --- a/doc/source/whatsnew/v0.7.3.rst +++ b/doc/source/whatsnew/v0.7.3.rst @@ -51,7 +51,7 @@ NA boolean comparison API change Reverted some changes to how NA values (represented typically as ``NaN`` or ``None``) are handled in non-numeric Series: -.. ipython:: python +.. code-block:: python series = pd.Series(["Steve", np.nan, "Joe"]) series == "Steve" @@ -62,7 +62,7 @@ In comparisons, NA / NaN will always come through as ``False`` except with negation, in the presence of NA data. You may wish to add an explicit NA filter into boolean array operations if you are worried about this: -.. ipython:: python +.. code-block:: python mask = series == "Steve" series[mask & series.notnull()] @@ -80,8 +80,7 @@ Other API changes When calling ``apply`` on a grouped Series, the return value will also be a Series, to be more consistent with the ``groupby`` behavior with DataFrame: -.. ipython:: python - :okwarning: +.. code-block:: python df = pd.DataFrame( { diff --git a/doc/source/whatsnew/v0.8.0.rst b/doc/source/whatsnew/v0.8.0.rst index 490175914cef1..0ae7347f5ded2 100644 --- a/doc/source/whatsnew/v0.8.0.rst +++ b/doc/source/whatsnew/v0.8.0.rst @@ -204,7 +204,7 @@ have code that converts ``DateRange`` or ``Index`` objects that used to contain ``datetime.datetime`` values to plain NumPy arrays, you may have bugs lurking with code using scalar values because you are handing control over to NumPy: -.. ipython:: python +.. code-block:: python import datetime @@ -225,7 +225,7 @@ If you have code that requires an array of ``datetime.datetime`` objects, you have a couple of options. First, the ``astype(object)`` method of ``DatetimeIndex`` produces an array of ``Timestamp`` objects: -.. ipython:: python +.. code-block:: python stamp_array = rng.astype(object) stamp_array @@ -234,7 +234,7 @@ produces an array of ``Timestamp`` objects: To get an array of proper ``datetime.datetime`` objects, use the ``to_pydatetime`` method: -.. ipython:: python +.. code-block:: python dt_array = rng.to_pydatetime() dt_array @@ -252,7 +252,7 @@ type. See `matplotlib documentation in NumPy 1.6. In particular, the string version of the array shows garbage values, and conversion to ``dtype=object`` is similarly broken. - .. ipython:: python + .. code-block:: python rng = pd.date_range("1/1/2000", periods=10) rng diff --git a/doc/source/whatsnew/v0.9.0.rst b/doc/source/whatsnew/v0.9.0.rst index 44ded51e31fda..d7e5698f1868e 100644 --- a/doc/source/whatsnew/v0.9.0.rst +++ b/doc/source/whatsnew/v0.9.0.rst @@ -37,7 +37,7 @@ API changes functions like ``read_csv`` has changed to be more Pythonic and amenable to attribute access: -.. ipython:: python +.. code-block:: python import io @@ -56,7 +56,7 @@ API changes "by accident" (this was never intended) will lead to all NA Series in some cases. To be perfectly clear: -.. ipython:: python +.. code-block:: python s1 = pd.Series([1, 2, 3]) s1 diff --git a/doc/source/whatsnew/v0.9.1.rst b/doc/source/whatsnew/v0.9.1.rst index 6b05e5bcded7e..03a2e3f408f56 100644 --- a/doc/source/whatsnew/v0.9.1.rst +++ b/doc/source/whatsnew/v0.9.1.rst @@ -38,7 +38,7 @@ New features ``na_option`` parameter so missing values can be assigned either the largest or the smallest rank (:issue:`1508`, :issue:`2159`) - .. ipython:: python + .. code-block:: python df = pd.DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C']) @@ -57,7 +57,7 @@ New features DataFrame currently supports slicing via a boolean vector the same length as the DataFrame (inside the ``[]``). The returned DataFrame has the same number of columns as the original, but is sliced on its index. - .. ipython:: python + .. code-block:: python df = DataFrame(np.random.randn(5, 3), columns = ['A','B','C']) @@ -70,7 +70,7 @@ New features elements that do not meet the boolean condition as ``NaN``. This is accomplished via the new method ``DataFrame.where``. In addition, ``where`` takes an optional ``other`` argument for replacement. - .. ipython:: python + .. code-block:: python df[df>0] @@ -81,7 +81,7 @@ New features Furthermore, ``where`` now aligns the input boolean condition (ndarray or DataFrame), such that partial selection with setting is possible. This is analogous to partial setting via ``.ix`` (but on the contents rather than the axis labels) - .. ipython:: python + .. code-block:: python df2 = df.copy() df2[ df2[1:4] > 0 ] = 3 @@ -89,13 +89,13 @@ New features ``DataFrame.mask`` is the inverse boolean operation of ``where``. - .. ipython:: python + .. code-block:: python df.mask(df<=0) - Enable referencing of Excel columns by their column names (:issue:`1936`) - .. ipython:: python + .. code-block:: python xl = pd.ExcelFile('data/test.xls') xl.parse('Sheet1', index_col=0, parse_dates=True, @@ -137,7 +137,7 @@ API changes - Period.end_time now returns the last nanosecond in the time interval (:issue:`2124`, :issue:`2125`, :issue:`1764`) - .. ipython:: python + .. code-block:: python p = pd.Period('2012') @@ -147,7 +147,7 @@ API changes - File parsers no longer coerce to float or bool for columns that have custom converters specified (:issue:`2184`) - .. ipython:: python + .. code-block:: python import io
xref #6856
https://api.github.com/repos/pandas-dev/pandas/pulls/41442
2021-05-12T18:53:48Z
2021-05-14T17:19:59Z
null
2022-11-18T02:21:38Z
BUG: Series.str.extract with StringArray returning object dtype
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 84f9dae8a0850..73a6360c361db 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -750,6 +750,7 @@ Strings - Bug in the conversion from ``pyarrow.ChunkedArray`` to :class:`~arrays.StringArray` when the original had zero chunks (:issue:`41040`) - Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` ignoring replacements with ``regex=True`` for ``StringDType`` data (:issue:`41333`, :issue:`35977`) +- Bug in :meth:`Series.str.extract` with :class:`~arrays.StringArray` returning object dtype for empty :class:`DataFrame` (:issue:`41441`) Interval ^^^^^^^^ diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 2646ddfa45b58..025ec232adcb5 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -3108,17 +3108,16 @@ def _str_extract_noexpand(arr, pat, flags=0): # error: Incompatible types in assignment (expression has type # "DataFrame", variable has type "ndarray") result = DataFrame( # type: ignore[assignment] - columns=columns, dtype=object + columns=columns, dtype=result_dtype ) else: - dtype = _result_dtype(arr) # error: Incompatible types in assignment (expression has type # "DataFrame", variable has type "ndarray") result = DataFrame( # type:ignore[assignment] [groups_or_na(val) for val in arr], columns=columns, index=arr.index, - dtype=dtype, + dtype=result_dtype, ) return result, name @@ -3135,19 +3134,19 @@ def _str_extract_frame(arr, pat, flags=0): regex = re.compile(pat, flags=flags) groups_or_na = _groups_or_na_fun(regex) columns = _get_group_names(regex) + result_dtype = _result_dtype(arr) if len(arr) == 0: - return DataFrame(columns=columns, dtype=object) + return DataFrame(columns=columns, dtype=result_dtype) try: result_index = arr.index except AttributeError: result_index = None - dtype = _result_dtype(arr) return DataFrame( [groups_or_na(val) for val in arr], columns=columns, index=result_index, - dtype=dtype, + dtype=result_dtype, ) diff --git a/pandas/tests/strings/test_strings.py b/pandas/tests/strings/test_strings.py index 5d8a63fe481f8..a18d54b4de44d 100644 --- a/pandas/tests/strings/test_strings.py +++ b/pandas/tests/strings/test_strings.py @@ -175,17 +175,19 @@ def test_empty_str_methods(any_string_dtype): tm.assert_series_equal(empty_str, empty.str.repeat(3)) tm.assert_series_equal(empty_bool, empty.str.match("^a")) tm.assert_frame_equal( - DataFrame(columns=[0], dtype=str), empty.str.extract("()", expand=True) + DataFrame(columns=[0], dtype=any_string_dtype), + empty.str.extract("()", expand=True), ) tm.assert_frame_equal( - DataFrame(columns=[0, 1], dtype=str), empty.str.extract("()()", expand=True) + DataFrame(columns=[0, 1], dtype=any_string_dtype), + empty.str.extract("()()", expand=True), ) tm.assert_series_equal(empty_str, empty.str.extract("()", expand=False)) tm.assert_frame_equal( - DataFrame(columns=[0, 1], dtype=str), + DataFrame(columns=[0, 1], dtype=any_string_dtype), empty.str.extract("()()", expand=False), ) - tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies()) + tm.assert_frame_equal(DataFrame(), empty.str.get_dummies()) tm.assert_series_equal(empty_str, empty_str.str.join("")) tm.assert_series_equal(empty_int, empty.str.len()) tm.assert_series_equal(empty_object, empty_str.str.findall("a"))
test changes needed in #41372 revealed an issue with the current implementation. fixing for completeness and to add release note. the dtype for an empty DataFrame from .str.get_dummies looks iffy so marking this as draft until further investigation.
https://api.github.com/repos/pandas-dev/pandas/pulls/41441
2021-05-12T17:47:15Z
2021-05-14T00:26:37Z
2021-05-14T00:26:36Z
2021-05-14T09:14:28Z
BUG: convert dtypes copies column_index names
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0d39f13afc426..3cc378be55a80 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6179,7 +6179,7 @@ def convert_dtypes( for col_name, col in self.items() ] if len(results) > 0: - return concat(results, axis=1, copy=False) + return concat(results, axis=1, copy=False, names=self.columns.names) else: return self.copy() diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index b3b453ea6355a..23934f9313dfa 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -498,6 +498,8 @@ def get_result(self): index, columns = self.new_axes df = cons(data, index=index) df.columns = columns + if self.names: + df.columns.names = self.names return df.__finalize__(self, method="concat") # combine block managers diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 771d31aa6865b..1c70dc12c122f 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -501,3 +501,10 @@ def test_slice_shift_deprecated(self, frame_or_series): with tm.assert_produces_warning(FutureWarning): obj.slice_shift() + + def test_convert_dtypes_name(self): + # GH 41435 + df = DataFrame({"a": [1, 2], "b": [3, 4]}) + df.columns.name = "cols" + result = df.convert_dtypes() + assert result.columns.names == df.columns.names
- [x] closes #41435 - [x] tests added / passed tests may be in wrong place. solution may be the easiest, but it maybe not the best or most robust.
https://api.github.com/repos/pandas-dev/pandas/pulls/41440
2021-05-12T16:07:34Z
2021-05-13T13:22:03Z
null
2022-03-06T07:34:27Z
TST: Added regression test
diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index e46eed05caa86..2aea2cc9b37cd 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -10,6 +10,21 @@ class TestDatetimeIndex: + def test_datetimeindex_transpose_empty_df(self): + """ + Regression test for: + https://github.com/pandas-dev/pandas/issues/41382 + """ + df = DataFrame(index=pd.DatetimeIndex([])) + + expected = pd.DatetimeIndex([], dtype="datetime64[ns]", freq=None) + + result1 = df.T.sum().index + result2 = df.sum(axis=1).index + + tm.assert_index_equal(result1, expected) + tm.assert_index_equal(result2, expected) + def test_indexing_with_datetime_tz(self): # GH#8260
- [x] closes #41382 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41436
2021-05-12T12:45:38Z
2021-05-13T18:47:29Z
2021-05-13T18:47:29Z
2021-05-19T12:11:26Z
TYP: define `subset` arg in `Styler`
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 8fc2825ffcfc5..a96196a698f43 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -43,6 +43,7 @@ CSSProperties, CSSStyles, StylerRenderer, + Subset, Tooltips, maybe_convert_css_to_tuples, non_reducing_slice, @@ -545,7 +546,7 @@ def _apply( self, func: Callable[..., Styler], axis: Axis | None = 0, - subset=None, + subset: Subset | None = None, **kwargs, ) -> Styler: subset = slice(None) if subset is None else subset @@ -590,7 +591,7 @@ def apply( self, func: Callable[..., Styler], axis: Axis | None = 0, - subset=None, + subset: Subset | None = None, **kwargs, ) -> Styler: """ @@ -651,7 +652,9 @@ def apply( ) return self - def _applymap(self, func: Callable, subset=None, **kwargs) -> Styler: + def _applymap( + self, func: Callable, subset: Subset | None = None, **kwargs + ) -> Styler: func = partial(func, **kwargs) # applymap doesn't take kwargs? if subset is None: subset = pd.IndexSlice[:] @@ -660,7 +663,9 @@ def _applymap(self, func: Callable, subset=None, **kwargs) -> Styler: self._update_ctx(result) return self - def applymap(self, func: Callable, subset=None, **kwargs) -> Styler: + def applymap( + self, func: Callable, subset: Subset | None = None, **kwargs + ) -> Styler: """ Apply a CSS-styling function elementwise. @@ -707,7 +712,7 @@ def where( cond: Callable, value: str, other: str | None = None, - subset=None, + subset: Subset | None = None, **kwargs, ) -> Styler: """ @@ -1061,7 +1066,7 @@ def hide_index(self) -> Styler: self.hidden_index = True return self - def hide_columns(self, subset) -> Styler: + def hide_columns(self, subset: Subset) -> Styler: """ Hide columns from rendering. @@ -1093,7 +1098,7 @@ def background_gradient( low: float = 0, high: float = 0, axis: Axis | None = 0, - subset=None, + subset: Subset | None = None, text_color_threshold: float = 0.408, vmin: float | None = None, vmax: float | None = None, @@ -1239,7 +1244,7 @@ def background_gradient( ) return self - def set_properties(self, subset=None, **kwargs) -> Styler: + def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler: """ Set defined CSS-properties to each ``<td>`` HTML element within the given subset. @@ -1331,7 +1336,7 @@ def css(x): def bar( self, - subset=None, + subset: Subset | None = None, axis: Axis | None = 0, color="#d65f5f", width: float = 100, @@ -1417,7 +1422,7 @@ def bar( def highlight_null( self, null_color: str = "red", - subset: IndexLabel | None = None, + subset: Subset | None = None, props: str | None = None, ) -> Styler: """ @@ -1462,7 +1467,7 @@ def f(data: DataFrame, props: str) -> np.ndarray: def highlight_max( self, - subset: IndexLabel | None = None, + subset: Subset | None = None, color: str = "yellow", axis: Axis | None = 0, props: str | None = None, @@ -1511,7 +1516,7 @@ def f(data: FrameOrSeries, props: str) -> np.ndarray: def highlight_min( self, - subset: IndexLabel | None = None, + subset: Subset | None = None, color: str = "yellow", axis: Axis | None = 0, props: str | None = None, @@ -1560,7 +1565,7 @@ def f(data: FrameOrSeries, props: str) -> np.ndarray: def highlight_between( self, - subset: IndexLabel | None = None, + subset: Subset | None = None, color: str = "yellow", axis: Axis | None = 0, left: Scalar | Sequence | None = None, @@ -1667,7 +1672,7 @@ def highlight_between( def highlight_quantile( self, - subset: IndexLabel | None = None, + subset: Subset | None = None, color: str = "yellow", axis: Axis | None = 0, q_left: float = 0.0, diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 6917daaede2c6..6f7d298c7dec0 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -55,6 +55,7 @@ class CSSDict(TypedDict): CSSStyles = List[CSSDict] +Subset = Union[slice, Sequence, Index] class StylerRenderer: @@ -402,7 +403,7 @@ def _translate_body( def format( self, formatter: ExtFormatter | None = None, - subset: slice | Sequence[Any] | None = None, + subset: Subset | None = None, na_rep: str | None = None, precision: int | None = None, decimal: str = ".", @@ -772,7 +773,7 @@ def _maybe_wrap_formatter( return lambda x: na_rep if isna(x) else func_2(x) -def non_reducing_slice(slice_): +def non_reducing_slice(slice_: Subset): """ Ensure that a slice doesn't reduce to a Series or Scalar. @@ -809,7 +810,9 @@ def pred(part) -> bool: # slice(a, b, c) slice_ = [slice_] # to tuplize later else: - slice_ = [part if pred(part) else [part] for part in slice_] + # error: Item "slice" of "Union[slice, Sequence[Any]]" has no attribute + # "__iter__" (not iterable) -> is specifically list_like in conditional + slice_ = [p if pred(p) else [p] for p in slice_] # type: ignore[union-attr] return tuple(slice_)
This works but someone with more knowledge might be able to point out a better definition of the type for `Subset`, i.e. here `Subset = Union[slice, Sequence, Index]` It types all the `subset` args in `Styler`
https://api.github.com/repos/pandas-dev/pandas/pulls/41433
2021-05-12T09:34:33Z
2021-05-17T17:15:59Z
2021-05-17T17:15:59Z
2021-05-22T15:09:03Z
TST: Add test for old issues
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index a96e5b07b7f7e..b29855caf6c1d 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -1313,6 +1313,14 @@ def test_dropna(fill_value): tm.assert_equal(df.dropna(), expected_df) +def test_drop_duplicates_fill_value(): + # GH 11726 + df = pd.DataFrame(np.zeros((5, 5))).apply(lambda x: SparseArray(x, fill_value=0)) + result = df.drop_duplicates() + expected = pd.DataFrame({i: SparseArray([0.0], fill_value=0) for i in range(5)}) + tm.assert_frame_equal(result, expected) + + class TestMinMax: plain_data = np.arange(5).astype(float) data_neg = plain_data * (-1) diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py index 6d0d4e045e491..022b0f273493b 100644 --- a/pandas/tests/frame/methods/test_to_dict.py +++ b/pandas/tests/frame/methods/test_to_dict.py @@ -304,3 +304,10 @@ def test_to_dict_scalar_constructor_orient_dtype(self, data, expected_dtype): d = df.to_dict(orient="records") result = type(d[0]["a"]) assert result is expected_dtype + + def test_to_dict_mixed_numeric_frame(self): + # GH 12859 + df = DataFrame({"a": [1.0], "b": [9.0]}) + result = df.reset_index().to_dict("records") + expected = [{"index": 0, "a": 1.0, "b": 9.0}] + assert result == expected diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 117612696df11..2f87f4a19b93f 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -1121,3 +1121,27 @@ def test_apply_dropna_with_indexed_same(): ) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "as_index, expected", + [ + [ + False, + DataFrame( + [[1, 1, 1], [2, 2, 1]], columns=Index(["a", "b", None], dtype=object) + ), + ], + [ + True, + Series( + [1, 1], index=MultiIndex.from_tuples([(1, 1), (2, 2)], names=["a", "b"]) + ), + ], + ], +) +def test_apply_as_index_constant_lambda(as_index, expected): + # GH 13217 + df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 1, 2, 2], "c": [1, 1, 1, 1]}) + result = df.groupby(["a", "b"], as_index=as_index).apply(lambda x: 1) + tm.assert_equal(result, expected) diff --git a/pandas/tests/groupby/test_apply_mutate.py b/pandas/tests/groupby/test_apply_mutate.py index 529f76bf692ce..05c1f5b716f40 100644 --- a/pandas/tests/groupby/test_apply_mutate.py +++ b/pandas/tests/groupby/test_apply_mutate.py @@ -68,3 +68,63 @@ def fn(x): name="col2", ) tm.assert_series_equal(result, expected) + + +def test_apply_mutate_columns_multiindex(): + # GH 12652 + df = pd.DataFrame( + { + ("C", "julian"): [1, 2, 3], + ("B", "geoffrey"): [1, 2, 3], + ("A", "julian"): [1, 2, 3], + ("B", "julian"): [1, 2, 3], + ("A", "geoffrey"): [1, 2, 3], + ("C", "geoffrey"): [1, 2, 3], + }, + columns=pd.MultiIndex.from_tuples( + [ + ("A", "julian"), + ("A", "geoffrey"), + ("B", "julian"), + ("B", "geoffrey"), + ("C", "julian"), + ("C", "geoffrey"), + ] + ), + ) + + def add_column(grouped): + name = grouped.columns[0][1] + grouped["sum", name] = grouped.sum(axis=1) + return grouped + + result = df.groupby(level=1, axis=1).apply(add_column) + expected = pd.DataFrame( + [ + [1, 1, 1, 3, 1, 1, 1, 3], + [2, 2, 2, 6, 2, 2, 2, 6], + [ + 3, + 3, + 3, + 9, + 3, + 3, + 3, + 9, + ], + ], + columns=pd.MultiIndex.from_tuples( + [ + ("geoffrey", "A", "geoffrey"), + ("geoffrey", "B", "geoffrey"), + ("geoffrey", "C", "geoffrey"), + ("geoffrey", "sum", "geoffrey"), + ("julian", "A", "julian"), + ("julian", "B", "julian"), + ("julian", "C", "julian"), + ("julian", "sum", "julian"), + ] + ), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexes/object/__init__.py b/pandas/tests/indexes/object/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index b5822b768fdde..47657fff56ceb 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1366,7 +1366,7 @@ async def test_tab_complete_warning(self, ip): pytest.importorskip("IPython", minversion="6.0.0") from IPython.core.completer import provisionalcompleter - code = "import pandas as pd; idx = Index([1, 2])" + code = "import pandas as pd; idx = pd.Index([1, 2])" await ip.run_code(code) # GH 31324 newer jedi version raises Deprecation warning; @@ -1720,3 +1720,21 @@ def test_validate_1d_input(): ser = Series(0, range(4)) with pytest.raises(ValueError, match=msg): ser.index = np.array([[2, 3]] * 4) + + +@pytest.mark.parametrize( + "klass, extra_kwargs", + [ + [Index, {}], + [Int64Index, {}], + [Float64Index, {}], + [DatetimeIndex, {}], + [TimedeltaIndex, {}], + [PeriodIndex, {"freq": "Y"}], + ], +) +def test_construct_from_memoryview(klass, extra_kwargs): + # GH 13120 + result = klass(memoryview(np.arange(2000, 2005)), **extra_kwargs) + expected = klass(range(2000, 2005), **extra_kwargs) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 1495a34274a94..edd100219143c 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -2446,3 +2446,14 @@ def test_merge_duplicate_columns_with_suffix_causing_another_duplicate(): result = merge(left, right, on="a") expected = DataFrame([[1, 1, 1, 1, 2]], columns=["a", "b_x", "b_x", "b_x", "b_y"]) tm.assert_frame_equal(result, expected) + + +def test_merge_string_float_column_result(): + # GH 13353 + df1 = DataFrame([[1, 2], [3, 4]], columns=pd.Index(["a", 114.0])) + df2 = DataFrame([[9, 10], [11, 12]], columns=["x", "y"]) + result = merge(df2, df1, how="inner", left_index=True, right_index=True) + expected = DataFrame( + [[9, 10, 1, 2], [11, 12, 3, 4]], columns=pd.Index(["x", "y", "a", 114.0]) + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index 0e43e351bc082..8793026ee74ab 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -662,3 +662,11 @@ def test_getitem_categorical_str(): def test_slice_can_reorder_not_uniquely_indexed(): ser = Series(1, index=["a", "a", "b", "b", "c"]) ser[::-1] # it works! + + +@pytest.mark.parametrize("index_vals", ["aabcd", "aadcb"]) +def test_duplicated_index_getitem_positional_indexer(index_vals): + # GH 11747 + s = Series(range(5), index=list(index_vals)) + result = s[3] + assert result == 3 diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 675120e03d821..3f850dfbc6a39 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -286,6 +286,13 @@ def test_setitem_with_bool_mask_and_values_matching_n_trues_in_length(self): expected = Series([None] * 3 + list(range(5)) + [None] * 2).astype("object") tm.assert_series_equal(result, expected) + def test_setitem_nan_with_bool(self): + # GH 13034 + result = Series([True, False, True]) + result[0] = np.nan + expected = Series([np.nan, False, True], dtype=object) + tm.assert_series_equal(result, expected) + class TestSetitemViewCopySemantics: def test_setitem_invalidates_datetime_index_freq(self):
- [x] closes #11747 - [x] closes #11726 - [x] closes #12652 - [x] closes #12859 - [x] closes #13034 - [x] closes #13120 - [x] closes #13217 - [x] closes #13353 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41431
2021-05-12T04:43:22Z
2021-05-12T13:27:56Z
2021-05-12T13:27:55Z
2021-05-12T16:32:33Z
CLN: Create tests/window/moments/conftest for specific fixtures
diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py index b1f1bb7086149..24b28356a3099 100644 --- a/pandas/tests/window/conftest.py +++ b/pandas/tests/window/conftest.py @@ -1,18 +1,11 @@ -from datetime import ( - datetime, - timedelta, -) +from datetime import timedelta -import numpy as np import pytest import pandas.util._test_decorators as td from pandas import ( DataFrame, - Series, - bdate_range, - notna, to_datetime, ) @@ -141,168 +134,6 @@ def engine_and_raw(request): return request.param -# create the data only once as we are not setting it -def _create_consistency_data(): - def create_series(): - return [ - Series(dtype=object), - Series([np.nan]), - Series([np.nan, np.nan]), - Series([3.0]), - Series([np.nan, 3.0]), - Series([3.0, np.nan]), - Series([1.0, 3.0]), - Series([2.0, 2.0]), - Series([3.0, 1.0]), - Series( - [5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan] - ), - Series( - [ - np.nan, - 5.0, - 5.0, - 5.0, - np.nan, - np.nan, - np.nan, - 5.0, - 5.0, - np.nan, - np.nan, - ] - ), - Series( - [ - np.nan, - np.nan, - 5.0, - 5.0, - np.nan, - np.nan, - np.nan, - 5.0, - 5.0, - np.nan, - np.nan, - ] - ), - Series( - [ - np.nan, - 3.0, - np.nan, - 3.0, - 4.0, - 5.0, - 6.0, - np.nan, - np.nan, - 7.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series( - [ - np.nan, - 5.0, - np.nan, - 2.0, - 4.0, - 0.0, - 9.0, - np.nan, - np.nan, - 3.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series( - [ - 2.0, - 3.0, - np.nan, - 3.0, - 4.0, - 5.0, - 6.0, - np.nan, - np.nan, - 7.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series( - [ - 2.0, - 5.0, - np.nan, - 2.0, - 4.0, - 0.0, - 9.0, - np.nan, - np.nan, - 3.0, - 12.0, - 13.0, - 14.0, - 15.0, - ] - ), - Series(range(10)), - Series(range(20, 0, -2)), - ] - - def create_dataframes(): - return [ - DataFrame(), - DataFrame(columns=["a"]), - DataFrame(columns=["a", "a"]), - DataFrame(columns=["a", "b"]), - DataFrame(np.arange(10).reshape((5, 2))), - DataFrame(np.arange(25).reshape((5, 5))), - DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]), - ] + [DataFrame(s) for s in create_series()] - - def is_constant(x): - values = x.values.ravel("K") - return len(set(values[notna(values)])) == 1 - - def no_nans(x): - return x.notna().all().all() - - # data is a tuple(object, is_constant, no_nans) - data = create_series() + create_dataframes() - - return [(x, is_constant(x), no_nans(x)) for x in data] - - -@pytest.fixture(params=_create_consistency_data()) -def consistency_data(request): - """Create consistency data""" - return request.param - - -@pytest.fixture -def frame(): - """Make mocked frame as fixture.""" - return DataFrame( - np.random.randn(100, 10), - index=bdate_range(datetime(2009, 1, 1), periods=100), - columns=np.arange(10), - ) - - @pytest.fixture def times_frame(): """Frame for testing times argument in EWM groupby.""" @@ -328,16 +159,6 @@ def times_frame(): ) -@pytest.fixture -def series(): - """Make mocked series as fixture.""" - arr = np.random.randn(100) - locs = np.arange(20, 40) - arr[locs] = np.NaN - series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100)) - return series - - @pytest.fixture(params=["1 day", timedelta(days=1)]) def halflife_with_times(request): """Halflife argument for EWM when times is specified.""" diff --git a/pandas/tests/window/moments/conftest.py b/pandas/tests/window/moments/conftest.py new file mode 100644 index 0000000000000..829df1f3bfe2f --- /dev/null +++ b/pandas/tests/window/moments/conftest.py @@ -0,0 +1,183 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + bdate_range, + notna, +) + + +@pytest.fixture +def series(): + """Make mocked series as fixture.""" + arr = np.random.randn(100) + locs = np.arange(20, 40) + arr[locs] = np.NaN + series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100)) + return series + + +@pytest.fixture +def frame(): + """Make mocked frame as fixture.""" + return DataFrame( + np.random.randn(100, 10), + index=bdate_range(datetime(2009, 1, 1), periods=100), + columns=np.arange(10), + ) + + +# create the data only once as we are not setting it +def _create_consistency_data(): + def create_series(): + return [ + Series(dtype=object), + Series([np.nan]), + Series([np.nan, np.nan]), + Series([3.0]), + Series([np.nan, 3.0]), + Series([3.0, np.nan]), + Series([1.0, 3.0]), + Series([2.0, 2.0]), + Series([3.0, 1.0]), + Series( + [5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan] + ), + Series( + [ + np.nan, + 5.0, + 5.0, + 5.0, + np.nan, + np.nan, + np.nan, + 5.0, + 5.0, + np.nan, + np.nan, + ] + ), + Series( + [ + np.nan, + np.nan, + 5.0, + 5.0, + np.nan, + np.nan, + np.nan, + 5.0, + 5.0, + np.nan, + np.nan, + ] + ), + Series( + [ + np.nan, + 3.0, + np.nan, + 3.0, + 4.0, + 5.0, + 6.0, + np.nan, + np.nan, + 7.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series( + [ + np.nan, + 5.0, + np.nan, + 2.0, + 4.0, + 0.0, + 9.0, + np.nan, + np.nan, + 3.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series( + [ + 2.0, + 3.0, + np.nan, + 3.0, + 4.0, + 5.0, + 6.0, + np.nan, + np.nan, + 7.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series( + [ + 2.0, + 5.0, + np.nan, + 2.0, + 4.0, + 0.0, + 9.0, + np.nan, + np.nan, + 3.0, + 12.0, + 13.0, + 14.0, + 15.0, + ] + ), + Series(range(10)), + Series(range(20, 0, -2)), + ] + + def create_dataframes(): + return [ + DataFrame(), + DataFrame(columns=["a"]), + DataFrame(columns=["a", "a"]), + DataFrame(columns=["a", "b"]), + DataFrame(np.arange(10).reshape((5, 2))), + DataFrame(np.arange(25).reshape((5, 5))), + DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]), + ] + [DataFrame(s) for s in create_series()] + + def is_constant(x): + values = x.values.ravel("K") + return len(set(values[notna(values)])) == 1 + + def no_nans(x): + return x.notna().all().all() + + # data is a tuple(object, is_constant, no_nans) + data = create_series() + create_dataframes() + + return [(x, is_constant(x), no_nans(x)) for x in data] + + +@pytest.fixture(params=_create_consistency_data()) +def consistency_data(request): + """Create consistency data""" + return request.param diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index 300f3f5729614..e70d079739003 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -16,7 +16,8 @@ from pandas.core.base import SpecificationError -def test_getitem(frame): +def test_getitem(): + frame = DataFrame(np.random.randn(5, 5)) r = frame.rolling(window=5) tm.assert_index_equal(r._selected_obj.columns, frame.columns)
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them These fixtures were only specific to the tests in this directory.
https://api.github.com/repos/pandas-dev/pandas/pulls/41430
2021-05-12T03:17:28Z
2021-05-12T13:29:36Z
2021-05-12T13:29:36Z
2021-05-12T16:32:21Z
DEPR: setting Categorical._codes
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 7ec74b7045437..e5eb8ccf7cf65 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -644,6 +644,7 @@ Deprecations - Deprecated the ``level`` keyword for :class:`DataFrame` and :class:`Series` aggregations; use groupby instead (:issue:`39983`) - The ``inplace`` parameter of :meth:`Categorical.remove_categories`, :meth:`Categorical.add_categories`, :meth:`Categorical.reorder_categories`, :meth:`Categorical.rename_categories`, :meth:`Categorical.set_categories` is deprecated and will be removed in a future version (:issue:`37643`) - Deprecated :func:`merge` producing duplicated columns through the ``suffixes`` keyword and already existing columns (:issue:`22818`) +- Deprecated setting :attr:`Categorical._codes`, create a new :class:`Categorical` with the desired codes instead (:issue:`40606`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 26c582561cd3d..cb8a08f5668ac 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1861,6 +1861,12 @@ def _codes(self) -> np.ndarray: @_codes.setter def _codes(self, value: np.ndarray): + warn( + "Setting the codes on a Categorical is deprecated and will raise in " + "a future version. Create a new Categorical object instead", + FutureWarning, + stacklevel=2, + ) # GH#40606 NDArrayBacked.__init__(self, value, self.dtype) def _box_func(self, i: int): diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py index a063491cd08fa..bde75051389ca 100644 --- a/pandas/tests/arrays/categorical/test_api.py +++ b/pandas/tests/arrays/categorical/test_api.py @@ -489,6 +489,15 @@ def test_set_categories_inplace(self): tm.assert_index_equal(cat.categories, Index(["a", "b", "c", "d"])) + def test_codes_setter_deprecated(self): + cat = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]) + new_codes = cat._codes + 1 + with tm.assert_produces_warning(FutureWarning): + # GH#40606 + cat._codes = new_codes + + assert cat._codes is new_codes + class TestPrivateCategoricalAPI: def test_codes_immutable(self):
- [x] closes #40606 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41429
2021-05-12T01:49:28Z
2021-05-12T13:52:59Z
2021-05-12T13:52:59Z
2021-05-12T13:58:28Z
REG: quantile with IntegerArray/FloatingArray
diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py index efa36a5bd3ae9..32c50ed38eba0 100644 --- a/pandas/core/array_algos/quantile.py +++ b/pandas/core/array_algos/quantile.py @@ -37,7 +37,18 @@ def quantile_compat(values: ArrayLike, qs: np.ndarray, interpolation: str) -> Ar mask = isna(values) return _quantile_with_mask(values, mask, fill_value, qs, interpolation) else: - return _quantile_ea_compat(values, qs, interpolation) + # In general we don't want to import from arrays here; + # this is temporary pending discussion in GH#41428 + from pandas.core.arrays import BaseMaskedArray + + if isinstance(values, BaseMaskedArray): + # e.g. IntegerArray, does not implement _from_factorized + out = _quantile_ea_fallback(values, qs, interpolation) + + else: + out = _quantile_ea_compat(values, qs, interpolation) + + return out def _quantile_with_mask( @@ -144,3 +155,31 @@ def _quantile_ea_compat( # error: Incompatible return value type (got "ndarray", expected "ExtensionArray") return result # type: ignore[return-value] + + +def _quantile_ea_fallback( + values: ExtensionArray, qs: np.ndarray, interpolation: str +) -> ExtensionArray: + """ + quantile compatibility for ExtensionArray subclasses that do not + implement `_from_factorized`, e.g. IntegerArray. + + Notes + ----- + We assume that all impacted cases are 1D-only. + """ + mask = np.atleast_2d(np.asarray(values.isna())) + npvalues = np.atleast_2d(np.asarray(values)) + + res = _quantile_with_mask( + npvalues, + mask=mask, + fill_value=values.dtype.na_value, + qs=qs, + interpolation=interpolation, + ) + assert res.ndim == 2 + assert res.shape[0] == 1 + res = res[0] + out = type(values)._from_sequence(res, dtype=values.dtype) + return out diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index bd4dfdb4ebad0..e051e765b2ba3 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1316,7 +1316,6 @@ def quantile( assert is_list_like(qs) # caller is responsible for this result = quantile_compat(self.values, np.asarray(qs._values), interpolation) - return new_block(result, placement=self._mgr_locs, ndim=2) diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index dbb5cb357de47..7926ec52b1f28 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -548,22 +548,28 @@ class TestQuantileExtensionDtype: ), pd.period_range("2016-01-01", periods=9, freq="D"), pd.date_range("2016-01-01", periods=9, tz="US/Pacific"), - pytest.param( - pd.array(np.arange(9), dtype="Int64"), - marks=pytest.mark.xfail(reason="doesn't implement from_factorized"), - ), - pytest.param( - pd.array(np.arange(9), dtype="Float64"), - marks=pytest.mark.xfail(reason="doesn't implement from_factorized"), - ), + pd.array(np.arange(9), dtype="Int64"), + pd.array(np.arange(9), dtype="Float64"), ], ids=lambda x: str(x.dtype), ) def index(self, request): + # NB: not actually an Index object idx = request.param idx.name = "A" return idx + @pytest.fixture + def obj(self, index, frame_or_series): + # bc index is not always an Index (yet), we need to re-patch .name + obj = frame_or_series(index).copy() + + if frame_or_series is Series: + obj.name = "A" + else: + obj.columns = ["A"] + return obj + def compute_quantile(self, obj, qs): if isinstance(obj, Series): result = obj.quantile(qs) @@ -571,8 +577,7 @@ def compute_quantile(self, obj, qs): result = obj.quantile(qs, numeric_only=False) return result - def test_quantile_ea(self, index, frame_or_series): - obj = frame_or_series(index).copy() + def test_quantile_ea(self, obj, index): # result should be invariant to shuffling indexer = np.arange(len(index), dtype=np.intp) @@ -583,13 +588,14 @@ def test_quantile_ea(self, index, frame_or_series): result = self.compute_quantile(obj, qs) # expected here assumes len(index) == 9 - expected = Series([index[4], index[0], index[-1]], index=qs, name="A") - expected = frame_or_series(expected) + expected = Series( + [index[4], index[0], index[-1]], dtype=index.dtype, index=qs, name="A" + ) + expected = type(obj)(expected) tm.assert_equal(result, expected) - def test_quantile_ea_with_na(self, index, frame_or_series): - obj = frame_or_series(index).copy() + def test_quantile_ea_with_na(self, obj, index): obj.iloc[0] = index._na_value obj.iloc[-1] = index._na_value @@ -603,15 +609,15 @@ def test_quantile_ea_with_na(self, index, frame_or_series): result = self.compute_quantile(obj, qs) # expected here assumes len(index) == 9 - expected = Series([index[4], index[1], index[-2]], index=qs, name="A") - expected = frame_or_series(expected) + expected = Series( + [index[4], index[1], index[-2]], dtype=index.dtype, index=qs, name="A" + ) + expected = type(obj)(expected) tm.assert_equal(result, expected) # TODO: filtering can be removed after GH#39763 is fixed @pytest.mark.filterwarnings("ignore:Using .astype to convert:FutureWarning") - def test_quantile_ea_all_na(self, index, frame_or_series): - - obj = frame_or_series(index).copy() + def test_quantile_ea_all_na(self, obj, index, frame_or_series): obj.iloc[:] = index._na_value @@ -628,13 +634,12 @@ def test_quantile_ea_all_na(self, index, frame_or_series): result = self.compute_quantile(obj, qs) expected = index.take([-1, -1, -1], allow_fill=True, fill_value=index._na_value) - expected = Series(expected, index=qs) - expected = frame_or_series(expected) + expected = Series(expected, index=qs, name="A") + expected = type(obj)(expected) tm.assert_equal(result, expected) - def test_quantile_ea_scalar(self, index, frame_or_series): + def test_quantile_ea_scalar(self, obj, index): # scalar qs - obj = frame_or_series(index).copy() # result should be invariant to shuffling indexer = np.arange(len(index), dtype=np.intp) @@ -644,8 +649,8 @@ def test_quantile_ea_scalar(self, index, frame_or_series): qs = 0.5 result = self.compute_quantile(obj, qs) - expected = Series({"A": index[4]}, name=0.5) - if frame_or_series is Series: + expected = Series({"A": index[4]}, dtype=index.dtype, name=0.5) + if isinstance(obj, Series): expected = expected["A"] assert result == expected else:
- [x] closes #39771 - [x] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry Not present in any released version, so no release note needed as long as before 1.3
https://api.github.com/repos/pandas-dev/pandas/pulls/41428
2021-05-11T22:46:46Z
2021-05-31T17:41:02Z
2021-05-31T17:41:02Z
2021-05-31T18:57:57Z
BUG: DataFrameGroupBy.__getitem__ with non-unique columns
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 5adc8540e6864..14f33cc2c8535 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -895,6 +895,8 @@ Groupby/resample/rolling - Bug in :meth:`SeriesGroupBy.agg` failing to retain ordered :class:`CategoricalDtype` on order-preserving aggregations (:issue:`41147`) - Bug in :meth:`DataFrameGroupBy.min` and :meth:`DataFrameGroupBy.max` with multiple object-dtype columns and ``numeric_only=False`` incorrectly raising ``ValueError`` (:issue:41111`) - Bug in :meth:`DataFrameGroupBy.rank` with the GroupBy object's ``axis=0`` and the ``rank`` method's keyword ``axis=1`` (:issue:`41320`) +- Bug in :meth:`DataFrameGroupBy.__getitem__` with non-unique columns incorrectly returning a malformed :class:`SeriesGroupBy` instead of :class:`DataFrameGroupBy` (:issue:`41427`) +- Bug in :meth:`DataFrameGroupBy.transform` with non-unique columns incorrectly raising ``AttributeError`` (:issue:`41427`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/base.py b/pandas/core/base.py index e45c4bf514973..28f19a6beeec0 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -214,7 +214,7 @@ def ndim(self) -> int: @cache_readonly def _obj_with_exclusions(self): if self._selection is not None and isinstance(self.obj, ABCDataFrame): - return self.obj.reindex(columns=self._selection_list) + return self.obj[self._selection_list] if len(self.exclusions) > 0: return self.obj.drop(self.exclusions, axis=1) @@ -239,7 +239,9 @@ def __getitem__(self, key): else: if key not in self.obj: raise KeyError(f"Column not found: {key}") - return self._gotitem(key, ndim=1) + subset = self.obj[key] + ndim = subset.ndim + return self._gotitem(key, ndim=ndim, subset=subset) def _gotitem(self, key, ndim: int, subset=None): """ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index c5d9144893f48..2997deb41c78b 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1417,12 +1417,19 @@ def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFram return path, res def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame: - # iterate through columns + # iterate through columns, see test_transform_exclude_nuisance output = {} inds = [] for i, col in enumerate(obj): + subset = obj.iloc[:, i] + sgb = SeriesGroupBy( + subset, + selection=col, + grouper=self.grouper, + exclusions=self.exclusions, + ) try: - output[col] = self[col].transform(wrapper) + output[i] = sgb.transform(wrapper) except TypeError: # e.g. trying to call nanmean with string values pass @@ -1434,7 +1441,9 @@ def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame: columns = obj.columns.take(inds) - return self.obj._constructor(output, index=obj.index, columns=columns) + result = self.obj._constructor(output, index=obj.index) + result.columns = columns + return result def filter(self, func, dropna=True, *args, **kwargs): """ @@ -1504,7 +1513,7 @@ def filter(self, func, dropna=True, *args, **kwargs): return self._apply_filter(indices, dropna) - def __getitem__(self, key): + def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy: if self.axis == 1: # GH 37725 raise ValueError("Cannot subset columns when using axis=1") diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index b22e4749bfdfc..09317cbeec658 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -20,6 +20,10 @@ date_range, ) import pandas._testing as tm +from pandas.core.groupby.generic import ( + DataFrameGroupBy, + SeriesGroupBy, +) from pandas.core.groupby.groupby import DataError @@ -391,13 +395,31 @@ def test_transform_select_columns(df): tm.assert_frame_equal(result, expected) -def test_transform_exclude_nuisance(df): +@pytest.mark.parametrize("duplicates", [True, False]) +def test_transform_exclude_nuisance(df, duplicates): + # case that goes through _transform_item_by_item + + if duplicates: + # make sure we work with duplicate columns GH#41427 + df.columns = ["A", "C", "C", "D"] # this also tests orderings in transform between # series/frame to make sure it's consistent expected = {} grouped = df.groupby("A") - expected["C"] = grouped["C"].transform(np.mean) + + gbc = grouped["C"] + expected["C"] = gbc.transform(np.mean) + if duplicates: + # squeeze 1-column DataFrame down to Series + expected["C"] = expected["C"]["C"] + + assert isinstance(gbc.obj, DataFrame) + assert isinstance(gbc, DataFrameGroupBy) + else: + assert isinstance(gbc, SeriesGroupBy) + assert isinstance(gbc.obj, Series) + expected["D"] = grouped["D"].transform(np.mean) expected = DataFrame(expected) result = df.groupby("A").transform(np.mean)
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41427
2021-05-11T20:27:50Z
2021-05-12T01:10:28Z
2021-05-12T01:10:28Z
2021-05-12T01:36:48Z
REF: remove name arg from Grouping
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 598750475f3e8..4aac2630feb2c 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -446,15 +446,14 @@ def __init__( index: Index, grouper=None, obj: FrameOrSeries | None = None, - name: Hashable = None, level=None, sort: bool = True, observed: bool = False, in_axis: bool = False, dropna: bool = True, ): - self.name = name self.level = level + self._orig_grouper = grouper self.grouper = _convert_grouper(index, grouper) self.all_grouper = None self.index = index @@ -466,18 +465,11 @@ def __init__( self._passed_categorical = False - # right place for this? - if isinstance(grouper, (Series, Index)) and name is None: - self.name = grouper.name - # we have a single grouper which may be a myriad of things, # some of which are dependent on the passing in level ilevel = self._ilevel if ilevel is not None: - if self.name is None: - self.name = index.names[ilevel] - ( self.grouper, # Index self._codes, @@ -491,16 +483,22 @@ def __init__( # what key/level refer to exactly, don't need to # check again as we have by this point converted these # to an actual value (rather than a pd.Grouper) - _, grouper, _ = self.grouper._get_grouper( + _, newgrouper, newobj = self.grouper._get_grouper( # error: Value of type variable "FrameOrSeries" of "_get_grouper" # of "Grouper" cannot be "Optional[FrameOrSeries]" self.obj, # type: ignore[type-var] validate=False, ) - if self.name is None: - self.name = grouper.result_index.name - self.obj = self.grouper.obj - self.grouper = grouper._get_grouper() + self.obj = newobj + + ng = newgrouper._get_grouper() + if isinstance(newgrouper, ops.BinGrouper): + # in this case we have `ng is newgrouper` + self.grouper = ng + else: + # ops.BaseGrouper + # use Index instead of ndarray so we can recover the name + self.grouper = Index(ng, name=newgrouper.result_index.name) else: # a passed Categorical @@ -511,10 +509,6 @@ def __init__( self.grouper, self.sort, observed ) - # we are done - elif isinstance(self.grouper, Grouping): - self.grouper = self.grouper.grouper - # no level passed elif not isinstance( self.grouper, (Series, Index, ExtensionArray, np.ndarray) @@ -546,6 +540,23 @@ def __repr__(self) -> str: def __iter__(self): return iter(self.indices) + @cache_readonly + def name(self) -> Hashable: + ilevel = self._ilevel + if ilevel is not None: + return self.index.names[ilevel] + + if isinstance(self._orig_grouper, (Index, Series)): + return self._orig_grouper.name + + elif isinstance(self.grouper, ops.BaseGrouper): + return self.grouper.result_index.name + + elif isinstance(self.grouper, Index): + return self.grouper.name + + return None + @cache_readonly def _ilevel(self) -> int | None: """ @@ -814,25 +825,29 @@ def is_in_obj(gpr) -> bool: for gpr, level in zip(keys, levels): if is_in_obj(gpr): # df.groupby(df['name']) - in_axis, name = True, gpr.name - exclusions.add(name) + in_axis = True + exclusions.add(gpr.name) elif is_in_axis(gpr): # df.groupby('name') if gpr in obj: if validate: obj._check_label_or_level_ambiguity(gpr, axis=axis) in_axis, name, gpr = True, gpr, obj[gpr] + if gpr.ndim != 1: + # non-unique columns; raise here to get the name in the + # exception message + raise ValueError(f"Grouper for '{name}' not 1-dimensional") exclusions.add(name) elif obj._is_level_reference(gpr, axis=axis): - in_axis, name, level, gpr = False, None, gpr, None + in_axis, level, gpr = False, gpr, None else: raise KeyError(gpr) elif isinstance(gpr, Grouper) and gpr.key is not None: # Add key to exclusions exclusions.add(gpr.key) - in_axis, name = False, None + in_axis = False else: - in_axis, name = False, None + in_axis = False if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: raise ValueError( @@ -847,7 +862,6 @@ def is_in_obj(gpr) -> bool: group_axis, gpr, obj=obj, - name=name, level=level, sort=sort, observed=observed, diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 3045451974ee7..4226158f53ffa 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -1200,7 +1200,7 @@ def names(self) -> list[Hashable]: @property def groupings(self) -> list[grouper.Grouping]: lev = self.binlabels - ping = grouper.Grouping(lev, lev, in_axis=False, level=None, name=lev.name) + ping = grouper.Grouping(lev, lev, in_axis=False, level=None) return [ping] def _aggregate_series_fast(self, obj: Series, func: F) -> np.ndarray:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41426
2021-05-11T20:22:52Z
2021-05-17T18:05:47Z
2021-05-17T18:05:47Z
2021-05-17T18:06:45Z
BUG: always strip .freq when putting DTI/TDI into Series/DataFrame
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 9968a103a13bf..c2ff7361ff48b 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -972,6 +972,7 @@ Other - Bug in :func:`pandas.util.show_versions` where console JSON output was not proper JSON (:issue:`39701`) - Bug in :meth:`DataFrame.convert_dtypes` incorrectly raised ValueError when called on an empty DataFrame (:issue:`40393`) - Bug in :meth:`DataFrame.clip` not interpreting missing values as no threshold (:issue:`40420`) +- Bug in :class:`Series` backed by :class:`DatetimeArray` or :class:`TimedeltaArray` sometimes failing to set the array's ``freq`` to ``None`` (:issue:`41425`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/base.py b/pandas/core/base.py index 7a48b1fdfda1e..e2720fcbc7ec4 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -498,8 +498,8 @@ def to_numpy( >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> ser.to_numpy(dtype=object) - array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'), - Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')], + array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), + Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or ``dtype='datetime64[ns]'`` to return an ndarray of native diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 0541b76b377f7..31e32b053367b 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -85,6 +85,7 @@ from pandas.core.internals.blocks import ( ensure_block_shape, external_values, + maybe_coerce_values, new_block, to_native_types, ) @@ -701,7 +702,7 @@ def __init__( if verify_integrity: self._axes = [ensure_index(ax) for ax in axes] - self.arrays = [ensure_wrapped_if_datetimelike(arr) for arr in arrays] + self.arrays = [maybe_coerce_values(arr) for arr in arrays] self._verify_integrity() def _verify_integrity(self) -> None: @@ -814,7 +815,7 @@ def iset(self, loc: int | slice | np.ndarray, value: ArrayLike): # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item # but we should avoid that and pass directly the proper array - value = ensure_wrapped_if_datetimelike(value) + value = maybe_coerce_values(value) assert isinstance(value, (np.ndarray, ExtensionArray)) assert value.ndim == 1 @@ -873,7 +874,7 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: raise ValueError( f"Expected a 1D array, got an array with shape {value.shape}" ) - value = ensure_wrapped_if_datetimelike(value) + value = maybe_coerce_values(value) # TODO self.arrays can be empty # assert len(value) == len(self.arrays[0]) @@ -1188,7 +1189,7 @@ def __init__( assert len(arrays) == 1 self._axes = [ensure_index(ax) for ax in self._axes] arr = arrays[0] - arr = ensure_wrapped_if_datetimelike(arr) + arr = maybe_coerce_values(arr) if isinstance(arr, ABCPandasArray): arr = arr.to_numpy() self.arrays = [arr] diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index bd4dfdb4ebad0..4f1b16e747394 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1860,6 +1860,10 @@ def maybe_coerce_values(values) -> ArrayLike: if issubclass(values.dtype.type, str): values = np.array(values, dtype=object) + if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None: + # freq is only stored in DatetimeIndex/TimedeltaIndex, not in Series/DataFrame + values = values._with_freq(None) + return values diff --git a/pandas/core/series.py b/pandas/core/series.py index c8e9898f9462a..d0ff50cca5355 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -813,8 +813,8 @@ def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") - array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'), - Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')], + array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), + Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 33589027c0d0f..bb8347f0a0122 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -97,7 +97,10 @@ class TestDatetimeDtype(BaseDatetimeTests, base.BaseDtypeTests): class TestConstructors(BaseDatetimeTests, base.BaseConstructorsTests): - pass + def test_series_constructor(self, data): + # Series construction drops any .freq attr + data = data._with_freq(None) + super().test_series_constructor(data) class TestGetitem(BaseDatetimeTests, base.BaseGetitemTests): diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py index 62dc400f8de9f..51f66128b1500 100644 --- a/pandas/tests/frame/methods/test_set_index.py +++ b/pandas/tests/frame/methods/test_set_index.py @@ -96,7 +96,7 @@ def test_set_index_cast_datetimeindex(self): idf = df.set_index("A") assert isinstance(idf.index, DatetimeIndex) - def test_set_index_dst(self, using_array_manager): + def test_set_index_dst(self): di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific") df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index() @@ -106,8 +106,7 @@ def test_set_index_dst(self, using_array_manager): data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index"), ) - if not using_array_manager: - exp.index = exp.index._with_freq(None) + exp.index = exp.index._with_freq(None) tm.assert_frame_equal(res, exp) # GH#12920 diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 28465e3a979a7..4846e15da039f 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -586,7 +586,7 @@ def test_rolling_datetime(axis_frame, tz_naive_fixture): ), ], ) -def test_rolling_window_as_string(center, expected_data, using_array_manager): +def test_rolling_window_as_string(center, expected_data): # see gh-22590 date_today = datetime.now() days = date_range(date_today, date_today + timedelta(365), freq="D") @@ -602,9 +602,7 @@ def test_rolling_window_as_string(center, expected_data, using_array_manager): ].agg("max") index = days.rename("DateCol") - if not using_array_manager: - # INFO(ArrayManager) preserves the frequence of the index - index = index._with_freq(None) + index = index._with_freq(None) expected = Series(expected_data, index=index, name="metric") tm.assert_series_equal(result, expected)
ATM when we put a DatetimeArray/TimedeltaArray/DatetimeIndex/TimedeltaIndex (from here on, just "DTI") into a Series or DataFrame, we drop its .freq for BlockManager-DataFrame cases, but not the other three cases (BlockManager-Series, ArrayManager-DataFrame, ArrayManager-Series). The long-term behavior is definitely going to always drop the freq (more specifically, DTA/TDA won't _have_ freq, xref #31218). So this PR standardizes always-dropping. cc @mroeschke this should unblock your window PR cc @jorisvandenbossche bc this touches ArrayManager
https://api.github.com/repos/pandas-dev/pandas/pulls/41425
2021-05-11T20:14:32Z
2021-05-17T16:31:51Z
2021-05-17T16:31:51Z
2021-05-17T16:35:51Z
CLN: Grouping.__init__ logic belonging in _convert_grouper
diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 4650dbea27de1..1b5c11b363457 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -16,12 +16,11 @@ from pandas.errors import InvalidIndexError from pandas.util._decorators import cache_readonly +from pandas.core.dtypes.cast import sanitize_to_nanoseconds from pandas.core.dtypes.common import ( is_categorical_dtype, - is_datetime64_dtype, is_list_like, is_scalar, - is_timedelta64_dtype, ) import pandas.core.algorithms as algorithms @@ -466,9 +465,6 @@ def __init__( if isinstance(grouper, (Series, Index)) and name is None: self.name = grouper.name - if isinstance(grouper, MultiIndex): - self.grouper = grouper._values - # we have a single grouper which may be a myriad of things, # some of which are dependent on the passing in level @@ -506,14 +502,9 @@ def __init__( self.grouper = grouper._get_grouper() else: - if self.grouper is None and self.name is not None and self.obj is not None: - self.grouper = self.obj[self.name] - - elif isinstance(self.grouper, (list, tuple)): - self.grouper = com.asarray_tuplesafe(self.grouper) # a passed Categorical - elif is_categorical_dtype(self.grouper): + if is_categorical_dtype(self.grouper): self.grouper, self.all_grouper = recode_for_groupby( self.grouper, self.sort, observed @@ -539,7 +530,7 @@ def __init__( ) # we are done - if isinstance(self.grouper, Grouping): + elif isinstance(self.grouper, Grouping): self.grouper = self.grouper.grouper # no level passed @@ -562,14 +553,10 @@ def __init__( self.grouper = None # Try for sanity raise AssertionError(errmsg) - # if we have a date/time-like grouper, make sure that we have - # Timestamps like - if getattr(self.grouper, "dtype", None) is not None: - if is_datetime64_dtype(self.grouper): - self.grouper = self.grouper.astype("datetime64[ns]") - elif is_timedelta64_dtype(self.grouper): - - self.grouper = self.grouper.astype("timedelta64[ns]") + if isinstance(self.grouper, np.ndarray): + # if we have a date/time-like grouper, make sure that we have + # Timestamps like + self.grouper = sanitize_to_nanoseconds(self.grouper) def __repr__(self) -> str: return f"Grouping({self.name})" @@ -876,9 +863,14 @@ def _convert_grouper(axis: Index, grouper): return grouper._values else: return grouper.reindex(axis)._values - elif isinstance(grouper, (list, Series, Index, np.ndarray)): + elif isinstance(grouper, MultiIndex): + return grouper._values + elif isinstance(grouper, (list, tuple, Series, Index, np.ndarray)): if len(grouper) != len(axis): raise ValueError("Grouper and axis must be same length") + + if isinstance(grouper, (list, tuple)): + grouper = com.asarray_tuplesafe(grouper) return grouper else: return grouper
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41424
2021-05-11T19:59:13Z
2021-05-12T01:08:32Z
2021-05-12T01:08:32Z
2021-05-12T01:36:06Z
[ArrowStringArray] TST: parametrize tests/strings/test_case_justify.py
diff --git a/pandas/tests/strings/test_case_justify.py b/pandas/tests/strings/test_case_justify.py index b46f50e430b54..d6e2ca7399b4e 100644 --- a/pandas/tests/strings/test_case_justify.py +++ b/pandas/tests/strings/test_case_justify.py @@ -1,4 +1,5 @@ from datetime import datetime +import operator import numpy as np import pytest @@ -9,68 +10,80 @@ ) -def test_title(): - values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"]) +def test_title(any_string_dtype): + s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype) + result = s.str.title() + expected = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) - result = values.str.title() - exp = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"]) - tm.assert_series_equal(result, exp) - # mixed - mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0]) - mixed = mixed.str.title() - exp = Series(["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan]) - tm.assert_almost_equal(mixed, exp) +def test_title_mixed_object(): + s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0]) + result = s.str.title() + expected = Series( + ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan] + ) + tm.assert_almost_equal(result, expected) -def test_lower_upper(): - values = Series(["om", np.nan, "nom", "nom"]) +def test_lower_upper(any_string_dtype): + s = Series(["om", np.nan, "nom", "nom"], dtype=any_string_dtype) - result = values.str.upper() - exp = Series(["OM", np.nan, "NOM", "NOM"]) - tm.assert_series_equal(result, exp) + result = s.str.upper() + expected = Series(["OM", np.nan, "NOM", "NOM"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) result = result.str.lower() - tm.assert_series_equal(result, values) + tm.assert_series_equal(result, s) + - # mixed - mixed = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0]) - mixed = mixed.str.upper() - rs = Series(mixed).str.lower() - xp = Series(["a", np.nan, "b", np.nan, np.nan, "foo", np.nan, np.nan, np.nan]) - assert isinstance(rs, Series) - tm.assert_series_equal(rs, xp) +def test_lower_upper_mixed_object(): + s = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0]) + result = s.str.upper() + expected = Series(["A", np.nan, "B", np.nan, np.nan, "FOO", np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) -def test_capitalize(): - values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"]) - result = values.str.capitalize() - exp = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"]) - tm.assert_series_equal(result, exp) + result = s.str.lower() + expected = Series(["a", np.nan, "b", np.nan, np.nan, "foo", np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) - # mixed - mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0]) - mixed = mixed.str.capitalize() - exp = Series(["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan]) - tm.assert_almost_equal(mixed, exp) +def test_capitalize(any_string_dtype): + s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype) + result = s.str.capitalize() + expected = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) -def test_swapcase(): - values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"]) - result = values.str.swapcase() - exp = Series(["foo", "bar", np.nan, "bLAH", "BLURG"]) - tm.assert_series_equal(result, exp) - # mixed - mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "Blah", None, 1, 2.0]) - mixed = mixed.str.swapcase() - exp = Series(["foo", np.nan, "BAR", np.nan, np.nan, "bLAH", np.nan, np.nan, np.nan]) - tm.assert_almost_equal(mixed, exp) +def test_capitalize_mixed_object(): + s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0]) + result = s.str.capitalize() + expected = Series( + ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan] + ) + tm.assert_series_equal(result, expected) + + +def test_swapcase(any_string_dtype): + s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype) + result = s.str.swapcase() + expected = Series(["foo", "bar", np.nan, "bLAH", "BLURG"], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + +def test_swapcase_mixed_object(): + s = Series(["FOO", np.nan, "bar", True, datetime.today(), "Blah", None, 1, 2.0]) + result = s.str.swapcase() + expected = Series( + ["foo", np.nan, "BAR", np.nan, np.nan, "bLAH", np.nan, np.nan, np.nan] + ) + tm.assert_series_equal(result, expected) -def test_casemethods(): +def test_casemethods(any_string_dtype): values = ["aaa", "bbb", "CCC", "Dddd", "eEEE"] - s = Series(values) + s = Series(values, dtype=any_string_dtype) assert s.str.lower().tolist() == [v.lower() for v in values] assert s.str.upper().tolist() == [v.upper() for v in values] assert s.str.title().tolist() == [v.title() for v in values] @@ -78,108 +91,122 @@ def test_casemethods(): assert s.str.swapcase().tolist() == [v.swapcase() for v in values] -def test_pad(): - values = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"]) +def test_pad(any_string_dtype): + s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype) + + result = s.str.pad(5, side="left") + expected = Series( + [" a", " b", np.nan, " c", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) - result = values.str.pad(5, side="left") - exp = Series([" a", " b", np.nan, " c", np.nan, "eeeeee"]) - tm.assert_almost_equal(result, exp) + result = s.str.pad(5, side="right") + expected = Series( + ["a ", "b ", np.nan, "c ", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) - result = values.str.pad(5, side="right") - exp = Series(["a ", "b ", np.nan, "c ", np.nan, "eeeeee"]) - tm.assert_almost_equal(result, exp) + result = s.str.pad(5, side="both") + expected = Series( + [" a ", " b ", np.nan, " c ", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) - result = values.str.pad(5, side="both") - exp = Series([" a ", " b ", np.nan, " c ", np.nan, "eeeeee"]) - tm.assert_almost_equal(result, exp) - # mixed - mixed = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0]) +def test_pad_mixed_object(): + s = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0]) - rs = Series(mixed).str.pad(5, side="left") - xp = Series( + result = s.str.pad(5, side="left") + expected = Series( [" a", np.nan, " b", np.nan, np.nan, " ee", np.nan, np.nan, np.nan] ) + tm.assert_series_equal(result, expected) - assert isinstance(rs, Series) - tm.assert_almost_equal(rs, xp) - - mixed = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0]) - - rs = Series(mixed).str.pad(5, side="right") - xp = Series( + result = s.str.pad(5, side="right") + expected = Series( ["a ", np.nan, "b ", np.nan, np.nan, "ee ", np.nan, np.nan, np.nan] ) + tm.assert_series_equal(result, expected) - assert isinstance(rs, Series) - tm.assert_almost_equal(rs, xp) - - mixed = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0]) - - rs = Series(mixed).str.pad(5, side="both") - xp = Series( + result = s.str.pad(5, side="both") + expected = Series( [" a ", np.nan, " b ", np.nan, np.nan, " ee ", np.nan, np.nan, np.nan] ) + tm.assert_series_equal(result, expected) - assert isinstance(rs, Series) - tm.assert_almost_equal(rs, xp) +def test_pad_fillchar(any_string_dtype): + s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype) -def test_pad_fillchar(): + result = s.str.pad(5, side="left", fillchar="X") + expected = Series( + ["XXXXa", "XXXXb", np.nan, "XXXXc", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) - values = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"]) + result = s.str.pad(5, side="right", fillchar="X") + expected = Series( + ["aXXXX", "bXXXX", np.nan, "cXXXX", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) - result = values.str.pad(5, side="left", fillchar="X") - exp = Series(["XXXXa", "XXXXb", np.nan, "XXXXc", np.nan, "eeeeee"]) - tm.assert_almost_equal(result, exp) + result = s.str.pad(5, side="both", fillchar="X") + expected = Series( + ["XXaXX", "XXbXX", np.nan, "XXcXX", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) - result = values.str.pad(5, side="right", fillchar="X") - exp = Series(["aXXXX", "bXXXX", np.nan, "cXXXX", np.nan, "eeeeee"]) - tm.assert_almost_equal(result, exp) - result = values.str.pad(5, side="both", fillchar="X") - exp = Series(["XXaXX", "XXbXX", np.nan, "XXcXX", np.nan, "eeeeee"]) - tm.assert_almost_equal(result, exp) +def test_pad_fillchar_bad_arg_raises(any_string_dtype): + s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype) msg = "fillchar must be a character, not str" with pytest.raises(TypeError, match=msg): - result = values.str.pad(5, fillchar="XY") + s.str.pad(5, fillchar="XY") msg = "fillchar must be a character, not int" with pytest.raises(TypeError, match=msg): - result = values.str.pad(5, fillchar=5) + s.str.pad(5, fillchar=5) -@pytest.mark.parametrize("f", ["center", "ljust", "rjust", "zfill", "pad"]) -def test_pad_width(f): +@pytest.mark.parametrize("method_name", ["center", "ljust", "rjust", "zfill", "pad"]) +def test_pad_width_bad_arg_raises(method_name, any_string_dtype): # see gh-13598 - s = Series(["1", "22", "a", "bb"]) - msg = "width must be of integer type, not*" + s = Series(["1", "22", "a", "bb"], dtype=any_string_dtype) + op = operator.methodcaller(method_name, "f") + msg = "width must be of integer type, not str" with pytest.raises(TypeError, match=msg): - getattr(s.str, f)("f") + op(s.str) -def test_center_ljust_rjust(): - values = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"]) +def test_center_ljust_rjust(any_string_dtype): + s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype) - result = values.str.center(5) - exp = Series([" a ", " b ", np.nan, " c ", np.nan, "eeeeee"]) - tm.assert_almost_equal(result, exp) + result = s.str.center(5) + expected = Series( + [" a ", " b ", np.nan, " c ", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) - result = values.str.ljust(5) - exp = Series(["a ", "b ", np.nan, "c ", np.nan, "eeeeee"]) - tm.assert_almost_equal(result, exp) + result = s.str.ljust(5) + expected = Series( + ["a ", "b ", np.nan, "c ", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) - result = values.str.rjust(5) - exp = Series([" a", " b", np.nan, " c", np.nan, "eeeeee"]) - tm.assert_almost_equal(result, exp) + result = s.str.rjust(5) + expected = Series( + [" a", " b", np.nan, " c", np.nan, "eeeeee"], dtype=any_string_dtype + ) + tm.assert_series_equal(result, expected) - # mixed - mixed = Series(["a", np.nan, "b", True, datetime.today(), "c", "eee", None, 1, 2.0]) - rs = Series(mixed).str.center(5) - xp = Series( +def test_center_ljust_rjust_mixed_object(): + s = Series(["a", np.nan, "b", True, datetime.today(), "c", "eee", None, 1, 2.0]) + + result = s.str.center(5) + expected = Series( [ " a ", np.nan, @@ -193,11 +220,10 @@ def test_center_ljust_rjust(): np.nan, ] ) - assert isinstance(rs, Series) - tm.assert_almost_equal(rs, xp) + tm.assert_series_equal(result, expected) - rs = Series(mixed).str.ljust(5) - xp = Series( + result = s.str.ljust(5) + expected = Series( [ "a ", np.nan, @@ -211,11 +237,10 @@ def test_center_ljust_rjust(): np.nan, ] ) - assert isinstance(rs, Series) - tm.assert_almost_equal(rs, xp) + tm.assert_series_equal(result, expected) - rs = Series(mixed).str.rjust(5) - xp = Series( + result = s.str.rjust(5) + expected = Series( [ " a", np.nan, @@ -229,82 +254,95 @@ def test_center_ljust_rjust(): np.nan, ] ) - assert isinstance(rs, Series) - tm.assert_almost_equal(rs, xp) + tm.assert_series_equal(result, expected) -def test_center_ljust_rjust_fillchar(): - values = Series(["a", "bb", "cccc", "ddddd", "eeeeee"]) +def test_center_ljust_rjust_fillchar(any_string_dtype): + s = Series(["a", "bb", "cccc", "ddddd", "eeeeee"], dtype=any_string_dtype) - result = values.str.center(5, fillchar="X") - expected = Series(["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"]) + result = s.str.center(5, fillchar="X") + expected = Series( + ["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"], dtype=any_string_dtype + ) tm.assert_series_equal(result, expected) - expected = np.array([v.center(5, "X") for v in values.values], dtype=np.object_) - tm.assert_numpy_array_equal(result.values, expected) + expected = np.array([v.center(5, "X") for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) - result = values.str.ljust(5, fillchar="X") - expected = Series(["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"]) + result = s.str.ljust(5, fillchar="X") + expected = Series( + ["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"], dtype=any_string_dtype + ) tm.assert_series_equal(result, expected) - expected = np.array([v.ljust(5, "X") for v in values.values], dtype=np.object_) - tm.assert_numpy_array_equal(result.values, expected) + expected = np.array([v.ljust(5, "X") for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) - result = values.str.rjust(5, fillchar="X") - expected = Series(["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"]) + result = s.str.rjust(5, fillchar="X") + expected = Series( + ["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"], dtype=any_string_dtype + ) tm.assert_series_equal(result, expected) - expected = np.array([v.rjust(5, "X") for v in values.values], dtype=np.object_) - tm.assert_numpy_array_equal(result.values, expected) + expected = np.array([v.rjust(5, "X") for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) - # If fillchar is not a charatter, normal str raises TypeError + +def test_center_ljust_rjust_fillchar_bad_arg_raises(any_string_dtype): + s = Series(["a", "bb", "cccc", "ddddd", "eeeeee"], dtype=any_string_dtype) + + # If fillchar is not a character, normal str raises TypeError # 'aaa'.ljust(5, 'XY') # TypeError: must be char, not str template = "fillchar must be a character, not {dtype}" with pytest.raises(TypeError, match=template.format(dtype="str")): - values.str.center(5, fillchar="XY") + s.str.center(5, fillchar="XY") with pytest.raises(TypeError, match=template.format(dtype="str")): - values.str.ljust(5, fillchar="XY") + s.str.ljust(5, fillchar="XY") with pytest.raises(TypeError, match=template.format(dtype="str")): - values.str.rjust(5, fillchar="XY") + s.str.rjust(5, fillchar="XY") with pytest.raises(TypeError, match=template.format(dtype="int")): - values.str.center(5, fillchar=1) + s.str.center(5, fillchar=1) with pytest.raises(TypeError, match=template.format(dtype="int")): - values.str.ljust(5, fillchar=1) + s.str.ljust(5, fillchar=1) with pytest.raises(TypeError, match=template.format(dtype="int")): - values.str.rjust(5, fillchar=1) + s.str.rjust(5, fillchar=1) -def test_zfill(): - values = Series(["1", "22", "aaa", "333", "45678"]) +def test_zfill(any_string_dtype): + s = Series(["1", "22", "aaa", "333", "45678"], dtype=any_string_dtype) - result = values.str.zfill(5) - expected = Series(["00001", "00022", "00aaa", "00333", "45678"]) + result = s.str.zfill(5) + expected = Series( + ["00001", "00022", "00aaa", "00333", "45678"], dtype=any_string_dtype + ) tm.assert_series_equal(result, expected) - expected = np.array([v.zfill(5) for v in values.values], dtype=np.object_) - tm.assert_numpy_array_equal(result.values, expected) + expected = np.array([v.zfill(5) for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) - result = values.str.zfill(3) - expected = Series(["001", "022", "aaa", "333", "45678"]) + result = s.str.zfill(3) + expected = Series(["001", "022", "aaa", "333", "45678"], dtype=any_string_dtype) tm.assert_series_equal(result, expected) - expected = np.array([v.zfill(3) for v in values.values], dtype=np.object_) - tm.assert_numpy_array_equal(result.values, expected) + expected = np.array([v.zfill(3) for v in np.array(s)], dtype=np.object_) + tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected) - values = Series(["1", np.nan, "aaa", np.nan, "45678"]) - result = values.str.zfill(5) - expected = Series(["00001", np.nan, "00aaa", np.nan, "45678"]) + s = Series(["1", np.nan, "aaa", np.nan, "45678"], dtype=any_string_dtype) + result = s.str.zfill(5) + expected = Series( + ["00001", np.nan, "00aaa", np.nan, "45678"], dtype=any_string_dtype + ) tm.assert_series_equal(result, expected) -def test_wrap(): +def test_wrap(any_string_dtype): # test values are: two words less than width, two words equal to width, # two words greater than width, one word less than width, one word # equal to width, one word greater than width, multiple tokens with # trailing whitespace equal to width - values = Series( + s = Series( [ "hello world", "hello world!", @@ -315,11 +353,12 @@ def test_wrap(): "ab ab ab ab ", "ab ab ab ab a", "\t", - ] + ], + dtype=any_string_dtype, ) # expected values - xp = Series( + expected = Series( [ "hello world", "hello world!", @@ -330,15 +369,21 @@ def test_wrap(): "ab ab ab ab", "ab ab ab ab\na", "", - ] + ], + dtype=any_string_dtype, ) - rs = values.str.wrap(12, break_long_words=True) - tm.assert_series_equal(rs, xp) + result = s.str.wrap(12, break_long_words=True) + tm.assert_series_equal(result, expected) + - # test with pre and post whitespace (non-unicode), NaN, and non-ascii - # Unicode - values = Series([" pre ", np.nan, "\xac\u20ac\U00008000 abadcafe"]) - xp = Series([" pre", np.nan, "\xac\u20ac\U00008000 ab\nadcafe"]) - rs = values.str.wrap(6) - tm.assert_series_equal(rs, xp) +def test_wrap_unicode(any_string_dtype): + # test with pre and post whitespace (non-unicode), NaN, and non-ascii Unicode + s = Series( + [" pre ", np.nan, "\xac\u20ac\U00008000 abadcafe"], dtype=any_string_dtype + ) + expected = Series( + [" pre", np.nan, "\xac\u20ac\U00008000 ab\nadcafe"], dtype=any_string_dtype + ) + result = s.str.wrap(6) + tm.assert_series_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/41420
2021-05-11T15:28:33Z
2021-05-12T13:30:18Z
2021-05-12T13:30:18Z
2021-05-12T13:37:41Z
[ArrowStringArray] TST: parametrize str.extractall tests
diff --git a/pandas/tests/strings/test_extract.py b/pandas/tests/strings/test_extract.py index c1564a5c256a1..83401de0e5443 100644 --- a/pandas/tests/strings/test_extract.py +++ b/pandas/tests/strings/test_extract.py @@ -358,8 +358,8 @@ def test_extract_single_group_returns_frame(): tm.assert_frame_equal(r, e) -def test_extractall(): - subject_list = [ +def test_extractall(any_string_dtype): + data = [ "dave@google.com", "tdhock5@gmail.com", "maudelaperriere@gmail.com", @@ -378,7 +378,7 @@ def test_extractall(): ("c", "d", "com"), ("e", "f", "com"), ] - named_pattern = r""" + pat = r""" (?P<user>[a-z0-9]+) @ (?P<domain>[a-z]+) @@ -386,20 +386,22 @@ def test_extractall(): (?P<tld>[a-z]{2,4}) """ expected_columns = ["user", "domain", "tld"] - S = Series(subject_list) - # extractall should return a DataFrame with one row for each - # match, indexed by the subject from which the match came. + s = Series(data, dtype=any_string_dtype) + # extractall should return a DataFrame with one row for each match, indexed by the + # subject from which the match came. expected_index = MultiIndex.from_tuples( [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 0), (4, 1), (4, 2)], names=(None, "match"), ) - expected_df = DataFrame(expected_tuples, expected_index, expected_columns) - computed_df = S.str.extractall(named_pattern, re.VERBOSE) - tm.assert_frame_equal(computed_df, expected_df) + expected = DataFrame( + expected_tuples, expected_index, expected_columns, dtype=any_string_dtype + ) + result = s.str.extractall(pat, flags=re.VERBOSE) + tm.assert_frame_equal(result, expected) - # The index of the input Series should be used to construct - # the index of the output DataFrame: - series_index = MultiIndex.from_tuples( + # The index of the input Series should be used to construct the index of the output + # DataFrame: + mi = MultiIndex.from_tuples( [ ("single", "Dave"), ("single", "Toby"), @@ -410,7 +412,7 @@ def test_extractall(): ("none", "empty"), ] ) - Si = Series(subject_list, series_index) + s = Series(data, index=mi, dtype=any_string_dtype) expected_index = MultiIndex.from_tuples( [ ("single", "Dave", 0), @@ -424,67 +426,80 @@ def test_extractall(): ], names=(None, None, "match"), ) - expected_df = DataFrame(expected_tuples, expected_index, expected_columns) - computed_df = Si.str.extractall(named_pattern, re.VERBOSE) - tm.assert_frame_equal(computed_df, expected_df) + expected = DataFrame( + expected_tuples, expected_index, expected_columns, dtype=any_string_dtype + ) + result = s.str.extractall(pat, flags=re.VERBOSE) + tm.assert_frame_equal(result, expected) # MultiIndexed subject with names. - Sn = Series(subject_list, series_index) - Sn.index.names = ("matches", "description") + s = Series(data, index=mi, dtype=any_string_dtype) + s.index.names = ("matches", "description") expected_index.names = ("matches", "description", "match") - expected_df = DataFrame(expected_tuples, expected_index, expected_columns) - computed_df = Sn.str.extractall(named_pattern, re.VERBOSE) - tm.assert_frame_equal(computed_df, expected_df) - - # optional groups. - subject_list = ["", "A1", "32"] - named_pattern = "(?P<letter>[AB])?(?P<number>[123])" - computed_df = Series(subject_list).str.extractall(named_pattern) - expected_index = MultiIndex.from_tuples( - [(1, 0), (2, 0), (2, 1)], names=(None, "match") - ) - expected_df = DataFrame( - [("A", "1"), (np.nan, "3"), (np.nan, "2")], - expected_index, - columns=["letter", "number"], + expected = DataFrame( + expected_tuples, expected_index, expected_columns, dtype=any_string_dtype ) - tm.assert_frame_equal(computed_df, expected_df) + result = s.str.extractall(pat, flags=re.VERBOSE) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "pat,expected_names", + [ + # optional groups. + ("(?P<letter>[AB])?(?P<number>[123])", ["letter", "number"]), + # only one of two groups has a name. + ("([AB])?(?P<number>[123])", [0, "number"]), + ], +) +def test_extractall_column_names(pat, expected_names, any_string_dtype): + s = Series(["", "A1", "32"], dtype=any_string_dtype) - # only one of two groups has a name. - pattern = "([AB])?(?P<number>[123])" - computed_df = Series(subject_list).str.extractall(pattern) - expected_df = DataFrame( + result = s.str.extractall(pat) + expected = DataFrame( [("A", "1"), (np.nan, "3"), (np.nan, "2")], - expected_index, - columns=[0, "number"], + index=MultiIndex.from_tuples([(1, 0), (2, 0), (2, 1)], names=(None, "match")), + columns=expected_names, + dtype=any_string_dtype, ) - tm.assert_frame_equal(computed_df, expected_df) + tm.assert_frame_equal(result, expected) -def test_extractall_single_group(): - # extractall(one named group) returns DataFrame with one named - # column. - s = Series(["a3", "b3", "d4c2"], name="series_name") - r = s.str.extractall(r"(?P<letter>[a-z])") - i = MultiIndex.from_tuples([(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match")) - e = DataFrame({"letter": ["a", "b", "d", "c"]}, i) - tm.assert_frame_equal(r, e) +def test_extractall_single_group(any_string_dtype): + s = Series(["a3", "b3", "d4c2"], name="series_name", dtype=any_string_dtype) + expected_index = MultiIndex.from_tuples( + [(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match") + ) - # extractall(one un-named group) returns DataFrame with one - # un-named column. - r = s.str.extractall(r"([a-z])") - e = DataFrame(["a", "b", "d", "c"], i) - tm.assert_frame_equal(r, e) + # extractall(one named group) returns DataFrame with one named column. + result = s.str.extractall(r"(?P<letter>[a-z])") + expected = DataFrame( + {"letter": ["a", "b", "d", "c"]}, index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # extractall(one un-named group) returns DataFrame with one un-named column. + result = s.str.extractall(r"([a-z])") + expected = DataFrame( + ["a", "b", "d", "c"], index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) -def test_extractall_single_group_with_quantifier(): - # extractall(one un-named group with quantifier) returns - # DataFrame with one un-named column (GH13382). - s = Series(["ab3", "abc3", "d4cd2"], name="series_name") - r = s.str.extractall(r"([a-z]+)") - i = MultiIndex.from_tuples([(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match")) - e = DataFrame(["ab", "abc", "d", "cd"], i) - tm.assert_frame_equal(r, e) +def test_extractall_single_group_with_quantifier(any_string_dtype): + # GH#13382 + # extractall(one un-named group with quantifier) returns DataFrame with one un-named + # column. + s = Series(["ab3", "abc3", "d4cd2"], name="series_name", dtype=any_string_dtype) + result = s.str.extractall(r"([a-z]+)") + expected = DataFrame( + ["ab", "abc", "d", "cd"], + index=MultiIndex.from_tuples( + [(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match") + ), + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( @@ -500,78 +515,91 @@ def test_extractall_single_group_with_quantifier(): (["a3", "b3", "d4c2"], ("i1", "i2")), ], ) -def test_extractall_no_matches(data, names): +def test_extractall_no_matches(data, names, any_string_dtype): # GH19075 extractall with no matches should return a valid MultiIndex n = len(data) if len(names) == 1: - i = Index(range(n), name=names[0]) + index = Index(range(n), name=names[0]) else: - a = (tuple([i] * (n - 1)) for i in range(n)) - i = MultiIndex.from_tuples(a, names=names) - s = Series(data, name="series_name", index=i, dtype="object") - ei = MultiIndex.from_tuples([], names=(names + ("match",))) + tuples = (tuple([i] * (n - 1)) for i in range(n)) + index = MultiIndex.from_tuples(tuples, names=names) + s = Series(data, name="series_name", index=index, dtype=any_string_dtype) + expected_index = MultiIndex.from_tuples([], names=(names + ("match",))) # one un-named group. - r = s.str.extractall("(z)") - e = DataFrame(columns=[0], index=ei) - tm.assert_frame_equal(r, e) + result = s.str.extractall("(z)") + expected = DataFrame(columns=[0], index=expected_index, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) # two un-named groups. - r = s.str.extractall("(z)(z)") - e = DataFrame(columns=[0, 1], index=ei) - tm.assert_frame_equal(r, e) + result = s.str.extractall("(z)(z)") + expected = DataFrame(columns=[0, 1], index=expected_index, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) # one named group. - r = s.str.extractall("(?P<first>z)") - e = DataFrame(columns=["first"], index=ei) - tm.assert_frame_equal(r, e) + result = s.str.extractall("(?P<first>z)") + expected = DataFrame( + columns=["first"], index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) # two named groups. - r = s.str.extractall("(?P<first>z)(?P<second>z)") - e = DataFrame(columns=["first", "second"], index=ei) - tm.assert_frame_equal(r, e) + result = s.str.extractall("(?P<first>z)(?P<second>z)") + expected = DataFrame( + columns=["first", "second"], index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) # one named, one un-named. - r = s.str.extractall("(z)(?P<second>z)") - e = DataFrame(columns=[0, "second"], index=ei) - tm.assert_frame_equal(r, e) + result = s.str.extractall("(z)(?P<second>z)") + expected = DataFrame( + columns=[0, "second"], index=expected_index, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) -def test_extractall_stringindex(): - s = Series(["a1a2", "b1", "c1"], name="xxx") - res = s.str.extractall(r"[ab](?P<digit>\d)") - exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)], names=[None, "match"]) - exp = DataFrame({"digit": ["1", "2", "1"]}, index=exp_idx) - tm.assert_frame_equal(res, exp) +def test_extractall_stringindex(any_string_dtype): + s = Series(["a1a2", "b1", "c1"], name="xxx", dtype=any_string_dtype) + result = s.str.extractall(r"[ab](?P<digit>\d)") + expected = DataFrame( + {"digit": ["1", "2", "1"]}, + index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)], names=[None, "match"]), + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) - # index should return the same result as the default index without name - # thus index.name doesn't affect to the result - for idx in [ - Index(["a1a2", "b1", "c1"]), - Index(["a1a2", "b1", "c1"], name="xxx"), - ]: + # index should return the same result as the default index without name thus + # index.name doesn't affect to the result + if any_string_dtype == "object": + for idx in [ + Index(["a1a2", "b1", "c1"]), + Index(["a1a2", "b1", "c1"], name="xxx"), + ]: - res = idx.str.extractall(r"[ab](?P<digit>\d)") - tm.assert_frame_equal(res, exp) + result = idx.str.extractall(r"[ab](?P<digit>\d)") + tm.assert_frame_equal(result, expected) s = Series( ["a1a2", "b1", "c1"], name="s_name", index=Index(["XX", "yy", "zz"], name="idx_name"), + dtype=any_string_dtype, ) - res = s.str.extractall(r"[ab](?P<digit>\d)") - exp_idx = MultiIndex.from_tuples( - [("XX", 0), ("XX", 1), ("yy", 0)], names=["idx_name", "match"] + result = s.str.extractall(r"[ab](?P<digit>\d)") + expected = DataFrame( + {"digit": ["1", "2", "1"]}, + index=MultiIndex.from_tuples( + [("XX", 0), ("XX", 1), ("yy", 0)], names=["idx_name", "match"] + ), + dtype=any_string_dtype, ) - exp = DataFrame({"digit": ["1", "2", "1"]}, index=exp_idx) - tm.assert_frame_equal(res, exp) + tm.assert_frame_equal(result, expected) -def test_extractall_errors(): - # Does not make sense to use extractall with a regex that has - # no capture groups. (it returns DataFrame with one column for - # each capture group) - s = Series(["a3", "b3", "d4c2"], name="series_name") +def test_extractall_no_capture_groups_raises(any_string_dtype): + # Does not make sense to use extractall with a regex that has no capture groups. + # (it returns DataFrame with one column for each capture group) + s = Series(["a3", "b3", "d4c2"], name="series_name", dtype=any_string_dtype) with pytest.raises(ValueError, match="no capture groups"): s.str.extractall(r"[a-z]") @@ -591,8 +619,8 @@ def test_extract_index_one_two_groups(): tm.assert_frame_equal(r, e) -def test_extractall_same_as_extract(): - s = Series(["a3", "b3", "c2"], name="series_name") +def test_extractall_same_as_extract(any_string_dtype): + s = Series(["a3", "b3", "c2"], name="series_name", dtype=any_string_dtype) pattern_two_noname = r"([a-z])([0-9])" extract_two_noname = s.str.extract(pattern_two_noname, expand=True) @@ -619,13 +647,13 @@ def test_extractall_same_as_extract(): tm.assert_frame_equal(extract_one_noname, no_multi_index) -def test_extractall_same_as_extract_subject_index(): +def test_extractall_same_as_extract_subject_index(any_string_dtype): # same as above tests, but s has an MultiIndex. - i = MultiIndex.from_tuples( + mi = MultiIndex.from_tuples( [("A", "first"), ("B", "second"), ("C", "third")], names=("capital", "ordinal"), ) - s = Series(["a3", "b3", "c2"], i, name="series_name") + s = Series(["a3", "b3", "c2"], index=mi, name="series_name", dtype=any_string_dtype) pattern_two_noname = r"([a-z])([0-9])" extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
https://api.github.com/repos/pandas-dev/pandas/pulls/41419
2021-05-11T14:01:24Z
2021-05-11T21:50:46Z
2021-05-11T21:50:46Z
2021-05-12T16:01:17Z
[ArrowStringArray] REF: str.extract argument validation
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 55a12a301c6e6..2646ddfa45b58 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -2,17 +2,20 @@ from functools import wraps import re from typing import ( + TYPE_CHECKING, Dict, Hashable, List, Optional, Pattern, + Union, ) import warnings import numpy as np import pandas._libs.lib as lib +from pandas._typing import FrameOrSeriesUnion from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -33,6 +36,9 @@ from pandas.core.base import NoNewAttributesMixin +if TYPE_CHECKING: + from pandas import Index + _shared_docs: Dict[str, str] = {} _cpython_optimized_encoders = ( "utf-8", @@ -2276,7 +2282,9 @@ def findall(self, pat, flags=0): return self._wrap_result(result, returns_string=False) @forbid_nonstring_types(["bytes"]) - def extract(self, pat, flags=0, expand=True): + def extract( + self, pat: str, flags: int = 0, expand: bool = True + ) -> Union[FrameOrSeriesUnion, "Index"]: r""" Extract capture groups in the regex `pat` as columns in a DataFrame. @@ -2357,6 +2365,16 @@ def extract(self, pat, flags=0, expand=True): 2 NaN dtype: object """ + if not isinstance(expand, bool): + raise ValueError("expand must be True or False") + + regex = re.compile(pat, flags=flags) + if regex.groups == 0: + raise ValueError("pattern contains no capture groups") + + if not expand and regex.groups > 1 and isinstance(self._data, ABCIndex): + raise ValueError("only one regex group is supported with Index") + # TODO: dispatch return str_extract(self, pat, flags, expand=expand) @@ -3010,8 +3028,6 @@ def cat_core(list_of_columns: List, sep: str): def _groups_or_na_fun(regex): """Used in both extract_noexpand and extract_frame""" - if regex.groups == 0: - raise ValueError("pattern contains no capture groups") empty_row = [np.nan] * regex.groups def f(x): @@ -3086,8 +3102,6 @@ def _str_extract_noexpand(arr, pat, flags=0): # not dispatching, so we have to reconstruct here. result = pd_array(result, dtype=result_dtype) else: - if isinstance(arr, ABCIndex): - raise ValueError("only one regex group is supported with Index") name = None columns = _get_group_names(regex) if arr.size == 0: @@ -3138,8 +3152,6 @@ def _str_extract_frame(arr, pat, flags=0): def str_extract(arr, pat, flags=0, expand=True): - if not isinstance(expand, bool): - raise ValueError("expand must be True or False") if expand: result = _str_extract_frame(arr._orig, pat, flags=flags) return result.__finalize__(arr._orig, method="str_extract")
https://api.github.com/repos/pandas-dev/pandas/pulls/41418
2021-05-11T11:48:32Z
2021-05-12T13:30:56Z
2021-05-12T13:30:56Z
2021-05-12T13:34:03Z
[ArrowStringArray] REF: extract/extractall column names
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 9f8c9fa2f0515..55a12a301c6e6 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -3,8 +3,10 @@ import re from typing import ( Dict, + Hashable, List, Optional, + Pattern, ) import warnings @@ -3036,13 +3038,31 @@ def _result_dtype(arr): return object -def _get_single_group_name(rx): - try: - return list(rx.groupindex.keys()).pop() - except IndexError: +def _get_single_group_name(regex: Pattern) -> Hashable: + if regex.groupindex: + return next(iter(regex.groupindex)) + else: return None +def _get_group_names(regex: Pattern) -> List[Hashable]: + """ + Get named groups from compiled regex. + + Unnamed groups are numbered. + + Parameters + ---------- + regex : compiled regex + + Returns + ------- + list of column labels + """ + names = {v: k for k, v in regex.groupindex.items()} + return [names.get(1 + i, i) for i in range(regex.groups)] + + def _str_extract_noexpand(arr, pat, flags=0): """ Find groups in each string in the Series using passed regular @@ -3069,8 +3089,7 @@ def _str_extract_noexpand(arr, pat, flags=0): if isinstance(arr, ABCIndex): raise ValueError("only one regex group is supported with Index") name = None - names = dict(zip(regex.groupindex.values(), regex.groupindex.keys())) - columns = [names.get(1 + i, i) for i in range(regex.groups)] + columns = _get_group_names(regex) if arr.size == 0: # error: Incompatible types in assignment (expression has type # "DataFrame", variable has type "ndarray") @@ -3101,8 +3120,7 @@ def _str_extract_frame(arr, pat, flags=0): regex = re.compile(pat, flags=flags) groups_or_na = _groups_or_na_fun(regex) - names = dict(zip(regex.groupindex.values(), regex.groupindex.keys())) - columns = [names.get(1 + i, i) for i in range(regex.groups)] + columns = _get_group_names(regex) if len(arr) == 0: return DataFrame(columns=columns, dtype=object) @@ -3139,8 +3157,7 @@ def str_extractall(arr, pat, flags=0): if isinstance(arr, ABCIndex): arr = arr.to_series().reset_index(drop=True) - names = dict(zip(regex.groupindex.values(), regex.groupindex.keys())) - columns = [names.get(1 + i, i) for i in range(regex.groups)] + columns = _get_group_names(regex) match_list = [] index_list = [] is_mi = arr.index.nlevels > 1
https://api.github.com/repos/pandas-dev/pandas/pulls/41417
2021-05-11T10:43:19Z
2021-05-12T01:07:02Z
2021-05-12T01:07:02Z
2021-05-12T08:39:43Z
CLN: fix numpy FutureWarning in tests
diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index c5e086d24ec0c..1961a2d9d89f8 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -311,7 +311,7 @@ def test_sub_object(self): index - "foo" with pytest.raises(TypeError, match=msg): - index - np.array([2, "foo"]) + index - np.array([2, "foo"], dtype=object) def test_rsub_object(self): # GH#19369 diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 076cc155f3626..d16dda370c498 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1042,7 +1042,7 @@ def test_infer_dtype_datetime64_with_na(self, na_value): np.array([np.datetime64("2011-01-01"), Timestamp("2011-01-02")]), np.array([Timestamp("2011-01-02"), np.datetime64("2011-01-01")]), np.array([np.nan, Timestamp("2011-01-02"), 1.1]), - np.array([np.nan, "2011-01-01", Timestamp("2011-01-02")]), + np.array([np.nan, "2011-01-01", Timestamp("2011-01-02")], dtype=object), np.array([np.datetime64("nat"), np.timedelta64(1, "D")], dtype=object), np.array([np.timedelta64(1, "D"), np.datetime64("nat")], dtype=object), ], diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 0b2a4cfb94d18..93c95b3004876 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -163,6 +163,5 @@ def test_serializable(obj): class TestIsBoolIndexer: def test_non_bool_array_with_na(self): # in particular, this should not raise - arr = np.array(["A", "B", np.nan]) - + arr = np.array(["A", "B", np.nan], dtype=object) assert not com.is_bool_indexer(arr) diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 3e0c12c6a22cc..eb38f2f95d2d5 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1899,7 +1899,10 @@ def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache): @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_infer_datetime_format_series_with_nans(self, cache): s = Series( - np.array(["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan]) + np.array( + ["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan], + dtype=object, + ) ) tm.assert_series_equal( to_datetime(s, infer_datetime_format=False, cache=cache), @@ -1916,7 +1919,8 @@ def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache): "01/01/2011 00:00:00", "01/02/2011 00:00:00", "01/03/2011 00:00:00", - ] + ], + dtype=object, ) ) diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index e373323dfb6e1..8ce24dc963dc5 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -90,7 +90,7 @@ def test_hash_array(series): @pytest.mark.parametrize( - "arr2", [np.array([3, 4, "All"]), np.array([3, 4, "All"], dtype=object)] + "arr2", [np.array([3, 4, "All"], dtype="U"), np.array([3, 4, "All"], dtype=object)] ) def test_hash_array_mixed(arr2): result1 = hash_array(np.array(["3", "4", "All"]))
https://api.github.com/repos/pandas-dev/pandas/pulls/41414
2021-05-11T02:32:54Z
2021-05-12T01:06:21Z
2021-05-12T01:06:21Z
2022-11-18T02:21:38Z
BUG: able to create nonexistent Timestamp
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 5adc8540e6864..c0badcb4a22e7 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -612,6 +612,7 @@ Other API changes - Partially initialized :class:`CategoricalDtype` (i.e. those with ``categories=None`` objects will no longer compare as equal to fully initialized dtype objects. - Accessing ``_constructor_expanddim`` on a :class:`DataFrame` and ``_constructor_sliced`` on a :class:`Series` now raise an ``AttributeError``. Previously a ``NotImplementedError`` was raised (:issue:`38782`) - Added new ``engine`` and ``**engine_kwargs`` parameters to :meth:`DataFrame.to_sql` to support other future "SQL engines". Currently we still only use ``SQLAlchemy`` under the hood, but more engines are planned to be supported such as ``turbodbc`` (:issue:`36893`) +- Invocation of methods to check whether objects created through ```Timestamp`` are nonexistent times and raising ``NonExistentTimeError`` (:issue:`41072`) Build ===== diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index a4f764878d19e..0773e773793f6 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1280,6 +1280,9 @@ class Timestamp(_Timestamp): elif not is_offset_object(freq): freq = to_offset(freq) + naive_ts = convert_to_tsobject(ts_input, None, unit, 0, 0, nanosecond or 0) + tz_localize_to_utc_single(naive_ts.value, ts.tzinfo, False, "raise") + return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq, ts.fold) def _round(self, freq, mode, ambiguous='raise', nonexistent='raise'): diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 54f3f21dc9f6f..87c9cc9b1dad7 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -403,6 +403,13 @@ def test_tz_conversion_freq(self, tz_naive_fixture): t2 = Timestamp("2019-01-02 12:00", tz="UTC", freq="T") assert t2.tz_convert(tz="UTC").freq == t2.freq + def test_new_nonexistent_error(self): + msg = "2001-10-14 00:30:00" + with pytest.raises(pytz.NonExistentTimeError, match=msg): + Timestamp( + year=2001, month=10, day=14, hour=0, minute=30, tz="America/Santiago" + ) + class TestTimestampNsOperations: def test_nanosecond_string_parsing(self):
- [X] closes #41072 - [X] tests added / passed - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41413
2021-05-11T01:28:57Z
2021-08-17T00:52:31Z
null
2021-08-17T00:52:31Z
API: allow nan-likes in StringArray constructor
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index 32fbf4e6c7de3..3dbc74b1941d2 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -3,10 +3,12 @@ import numpy as np from pandas import ( + NA, Categorical, DataFrame, Series, ) +from pandas.core.arrays import StringArray from .pandas_vb_common import tm @@ -285,3 +287,18 @@ class Iter(Dtypes): def time_iter(self, dtype): for i in self.s: pass + + +class StringArrayConstruction: + def setup(self): + self.series_arr = tm.rands_array(nchars=10, size=10 ** 5) + self.series_arr_nan = np.concatenate([self.series_arr, np.array([NA] * 1000)]) + + def time_string_array_construction(self): + StringArray(self.series_arr) + + def time_string_array_with_nan_construction(self): + StringArray(self.series_arr_nan) + + def peakmem_stringarray_construction(self): + StringArray(self.series_arr) diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index ccad93d83eb5b..d015c7fa39e83 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -459,6 +459,7 @@ Other API changes - Change in the position of the ``min_rows`` argument in :meth:`DataFrame.to_string` due to change in the docstring (:issue:`44304`) - Reduction operations for :class:`DataFrame` or :class:`Series` now raising a ``ValueError`` when ``None`` is passed for ``skipna`` (:issue:`44178`) - :func:`read_csv` and :func:`read_html` no longer raising an error when one of the header rows consists only of ``Unnamed:`` columns (:issue:`13054`) +- :class:`StringArray` now accepts nan-likes (``None``, ``np.nan``) for the ``values`` parameter in its constructor in addition to strings and :attr:`pandas.NA`. (:issue:`40839`) - Changed the ``name`` attribute of several holidays in ``USFederalHolidayCalendar`` to match `official federal holiday names <https://www.opm.gov/policy-data-oversight/pay-leave/federal-holidays/>`_ diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index a7ebd9d0c77ad..5f6d8c8071f4a 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -150,7 +150,7 @@ def maybe_convert_numeric( def ensure_string_array( arr, na_value: object = ..., - convert_na_value: bool = ..., + coerce: str = ..., copy: bool = ..., skipna: bool = ..., ) -> npt.NDArray[np.object_]: ... diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 8f9016e726f1e..e373c8a584913 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -99,6 +99,7 @@ from pandas._libs.missing cimport ( is_null_timedelta64, isnaobj, ) +from pandas._libs.missing import checknull from pandas._libs.tslibs.conversion cimport convert_to_tsobject from pandas._libs.tslibs.nattype cimport ( NPY_NAT, @@ -670,12 +671,25 @@ def astype_intsafe(ndarray[object] arr, cnp.dtype new_dtype) -> ndarray: return result +ctypedef enum coerce_options: + all = 0 + strict_null = 1 + null = 2 + non_null = 3 + none = 4 + + +def strict_check_null(x): + # Cython doesn't let me define this in ensure_string_array :( + return x is None or x is C_NA or util.is_nan(x) + + @cython.wraparound(False) @cython.boundscheck(False) cpdef ndarray[object] ensure_string_array( arr, object na_value=np.nan, - bint convert_na_value=True, + coerce="all", bint copy=True, bint skipna=True, ): @@ -688,8 +702,16 @@ cpdef ndarray[object] ensure_string_array( The values to be converted to str, if needed. na_value : Any, default np.nan The value to use for na. For example, np.nan or pd.NA. - convert_na_value : bool, default True - If False, existing na values will be used unchanged in the new array. + coerce : {'all', 'strict-null', 'null', 'non-null', None}, default 'all' + Whether to coerce non-string elements to strings. + - 'all' will convert all non-string values. + - 'strict-null' will only convert pd.NA, np.nan, or None to na_value + raising when encountering non-strings and other null values. + - 'null' will convert nulls to na_value w/out converting other non-strings. + - 'non-null' will only convert non-null non-string elements to string. + - None will not convert anything. + If coerce is not 'all', a ValueError will be raised for values + that are not strings or na_value. copy : bool, default True Whether to ensure that a new array is returned. skipna : bool, default True @@ -699,10 +721,47 @@ cpdef ndarray[object] ensure_string_array( Returns ------- np.ndarray[object] - An array with the input array's elements casted to str or nan-like. + An array of strings and na_value. + + Raises + ------ + ValueError + If an element is encountered that is not a string or valid NA value + and element is not coerced. + + Examples + -------- + >>> import numpy as np + >>> import pandas as pd + >>> ensure_string_array(np.array([1,2,3, np.datetime64("nat")]), coerce="all") + array(['1', '2', '3', nan], dtype=object) + >>> ensure_string_array(np.array([pd.NA, "a", None]), coerce="strict-null") + array([nan, 'a', nan], dtype=object) + >>> ensure_string_array(np.array([pd.NaT, "1"]), coerce="null") + array([nan, '1'], dtype=object) + >>> ensure_string_array(np.array([1,2,3]), coerce="non-null") + array(['1', '2', '3'], dtype=object) + >>> ensure_string_array(np.array(["1", "2", "3"]), coerce=None) + array(['1', '2', '3'], dtype=object) """ cdef: Py_ssize_t i = 0, n = len(arr) + set strict_na_values = {C_NA, np.nan, None} + coerce_options coerce_val + + if coerce == "all": + coerce_val = all + elif coerce == "strict-null": + coerce_val = strict_null + elif coerce == "null": + coerce_val = null + elif coerce == "non-null": + coerce_val = non_null + elif coerce is None: + coerce_val = none + else: + raise ValueError("coerce argument must be one of " + f"'all'|'strict-null'|'null'|'non-null'|None, not {coerce}") if hasattr(arr, "to_numpy"): @@ -722,21 +781,34 @@ cpdef ndarray[object] ensure_string_array( if copy and result is arr: result = result.copy() + if coerce_val == strict_null: + # We don't use checknull, since NaT, Decimal("NaN"), etc. aren't valid + # If they are present, they are treated like a regular Python object + # and will either cause an exception to be raised or be coerced. + check_null = strict_check_null + else: + check_null = checknull + for i in range(n): val = arr[i] if isinstance(val, str): continue - if not checknull(val): - if not isinstance(val, np.floating): - # f"{val}" is faster than str(val) - result[i] = f"{val}" + if not check_null(val): + if coerce_val == all or coerce_val == non_null: + if not isinstance(val, np.floating): + # f"{val}" is faster than str(val) + result[i] = f"{val}" + else: + # f"{val}" is not always equivalent to str(val) for floats + result[i] = str(val) else: - # f"{val}" is not always equivalent to str(val) for floats - result[i] = str(val) + raise ValueError(f"Element {val} is not a string or valid null." + "If you want it to be coerced to a string," + "specify coerce='all'") else: - if convert_na_value: + if coerce_val != non_null and coerce_val != none: val = na_value if skipna: result[i] = val @@ -1881,8 +1953,8 @@ cdef class StringValidator(Validator): return issubclass(self.dtype.type, np.str_) cdef bint is_valid_null(self, object value) except -1: - # We deliberately exclude None / NaN here since StringArray uses NA - return value is C_NA + # Override to exclude float('Nan') and complex NaN + return value is None or value is C_NA or value is np.nan cpdef bint is_string_array(ndarray values, bint skipna=False): diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index c6987d9a11e4c..8fe5343e471ae 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -246,11 +246,18 @@ class StringArray(BaseStringArray, PandasArray): .. warning:: Currently, this expects an object-dtype ndarray - where the elements are Python strings or :attr:`pandas.NA`. + where the elements are Python strings + or nan-likes(``None``, ``np.nan``, ``NA``). This may change without warning in the future. Use :meth:`pandas.array` with ``dtype="string"`` for a stable way of creating a `StringArray` from any sequence. + .. versionchanged:: 1.4.0 + + StringArray now accepts nan-likes(``None``, ``np.nan``) for the + ``values`` parameter in its constructor + in addition to strings and :attr:`pandas.NA` + copy : bool, default False Whether to copy the array of data. @@ -310,6 +317,8 @@ def __init__(self, values, copy=False): values = extract_array(values) super().__init__(values, copy=copy) + if not isinstance(values, type(self)): + self._validate() # error: Incompatible types in assignment (expression has type "StringDtype", # variable has type "PandasDtype") NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage="python")) @@ -318,16 +327,25 @@ def __init__(self, values, copy=False): def _validate(self): """Validate that we only store NA or strings.""" - if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True): - raise ValueError("StringArray requires a sequence of strings or pandas.NA") if self._ndarray.dtype != "object": raise ValueError( "StringArray requires a sequence of strings or pandas.NA. Got " f"'{self._ndarray.dtype}' dtype instead." ) + try: + lib.ensure_string_array( + self._ndarray.ravel("K"), + na_value=StringDtype.na_value, + coerce="strict-null", + copy=False, + ) + except ValueError: + raise ValueError("StringArray requires a sequence of strings or pandas.NA") @classmethod - def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False): + def _from_sequence( + cls, scalars, *, dtype: Dtype | None = None, copy=False, coerce=True + ): if dtype and not (isinstance(dtype, str) and dtype == "string"): dtype = pandas_dtype(dtype) assert isinstance(dtype, StringDtype) and dtype.storage == "python" @@ -336,15 +354,23 @@ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False): if isinstance(scalars, BaseMaskedArray): # avoid costly conversion to object dtype + if coerce: + coerce = "non-null" + else: + coerce = None na_values = scalars._mask result = scalars._data - result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) + result = lib.ensure_string_array(result, copy=copy, coerce=coerce) result[na_values] = StringDtype.na_value else: # convert non-na-likes to str, and nan-likes to StringDtype.na_value + if coerce: + coerce = "all" + else: + coerce = "strict-null" result = lib.ensure_string_array( - scalars, na_value=StringDtype.na_value, copy=copy + scalars, na_value=StringDtype.na_value, copy=copy, coerce=coerce ) # Manually creating new array avoids the validation step in the __init__, so is diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 53fc38a973110..7a4ca57bf14ef 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -153,7 +153,9 @@ def __init__(self, values): ) @classmethod - def _from_sequence(cls, scalars, dtype: Dtype | None = None, copy: bool = False): + def _from_sequence( + cls, scalars, dtype: Dtype | None = None, copy: bool = False, coerce=True + ): from pandas.core.arrays.masked import BaseMaskedArray _chk_pyarrow_available() @@ -167,11 +169,19 @@ def _from_sequence(cls, scalars, dtype: Dtype | None = None, copy: bool = False) # numerical issues with Float32Dtype na_values = scalars._mask result = scalars._data - result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) + if coerce: + coerce = "non-null" + else: + coerce = None + result = lib.ensure_string_array(result, copy=copy, coerce=coerce) return cls(pa.array(result, mask=na_values, type=pa.string())) # convert non-na-likes to str - result = lib.ensure_string_array(scalars, copy=copy) + if coerce: + coerce = "all" + else: + coerce = "strict-null" + result = lib.ensure_string_array(scalars, copy=copy, coerce=coerce) return cls(pa.array(result, type=pa.string(), from_pandas=True)) @classmethod diff --git a/pandas/core/construction.py b/pandas/core/construction.py index cf8cd070ec562..2643625aa31b4 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -754,7 +754,7 @@ def _try_cast( elif dtype.kind == "U": # TODO: test cases with arr.dtype.kind in ["m", "M"] - return lib.ensure_string_array(arr, convert_na_value=False, copy=copy) + return lib.ensure_string_array(arr, coerce="non-null", copy=copy) elif dtype.kind in ["m", "M"]: return maybe_cast_to_datetime(arr, dtype) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b70ea9f816aef..386fb4744ceb3 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1122,7 +1122,7 @@ def astype_nansafe( return arr.astype(dtype, copy=copy) if issubclass(dtype.type, str): - return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False) + return lib.ensure_string_array(arr, skipna=skipna, coerce="non-null") elif is_datetime64_dtype(arr): # Non-overlapping equality check (left operand type: "dtype[Any]", right diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 7c3a8c691b786..10ff1c12d6fa8 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -2,15 +2,19 @@ This module tests the functionality of StringArray and ArrowStringArray. Tests for the str accessors are in pandas/tests/strings/test_string_array.py """ +from decimal import Decimal + import numpy as np import pytest +import pandas._libs.lib as lib import pandas.util._test_decorators as td from pandas.core.dtypes.common import is_dtype_equal import pandas as pd import pandas._testing as tm +from pandas.core.arrays import BaseMaskedArray from pandas.core.arrays.string_arrow import ArrowStringArray @@ -267,13 +271,63 @@ def test_constructor_raises(cls): cls(np.array([])) with pytest.raises(ValueError, match=msg): - cls(np.array(["a", np.nan], dtype=object)) + cls(np.array(["a", pd.NaT], dtype=object)) - with pytest.raises(ValueError, match=msg): - cls(np.array(["a", None], dtype=object)) - with pytest.raises(ValueError, match=msg): - cls(np.array(["a", pd.NaT], dtype=object)) +@pytest.mark.parametrize("na", [np.nan, np.float64("nan"), float("nan"), None, pd.NA]) +def test_constructor_nan_like(na): + expected = pd.arrays.StringArray(np.array(["a", pd.NA])) + tm.assert_extension_array_equal( + pd.arrays.StringArray(np.array(["a", na], dtype="object")), expected + ) + + +def test_invalid_coerce_raises(): + data = np.array(["a", "b"], dtype=object) + with pytest.raises( + ValueError, + match="coerce argument must be one of " + "'all'|'strict-null'|'null'|'non-null'|None, not abcd", + ): + lib.ensure_string_array(data, coerce="abcd") + + +@pytest.mark.parametrize( + "values", + [ + np.array(["foo", "bar", pd.NA], dtype=object), + np.array(["foo", "bar", np.nan], dtype=object), + np.array(["foo", "bar", None], dtype=object), + np.array(["foo", "bar", float("nan")], dtype=object), + np.array(["foo", "bar", np.float64("nan")], dtype=object), + BaseMaskedArray( + np.array(["foo", "bar", "garbage"]), np.array([False, False, True]) + ), + ], +) +def test_from_sequence_no_coerce(cls, values): + expected = pd.arrays.StringArray(np.array(["foo", "bar", pd.NA], dtype=object)) + result = cls._from_sequence(values, coerce=False) + # Use bare assert since classes are different + assert (result == expected).all() + + +@pytest.mark.parametrize( + "values", + [ + np.array(["foo", "bar", pd.NaT], dtype=object), + np.array(["foo", "bar", np.datetime64("nat")], dtype=object), + np.array(["foo", "bar", Decimal("nan")], dtype=object), + ], +) +def test_from_sequence_no_coerce_invalid(cls, values): + with pytest.raises( + ValueError, + match="Element .* is not a string or valid null." + "If you want it to be coerced to a string," + "specify coerce='all'", + ): + cls._from_sequence(values, coerce=False) @pytest.mark.parametrize("copy", [True, False]) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 7953d650636be..200a8c599737c 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1542,11 +1542,18 @@ def test_is_string_array(self): assert lib.is_string_array( np.array(["foo", "bar", pd.NA], dtype=object), skipna=True ) - # NaN is not valid for string array, just NA - assert not lib.is_string_array( + assert lib.is_string_array( np.array(["foo", "bar", np.nan], dtype=object), skipna=True ) - + assert lib.is_string_array( + np.array(["foo", "bar", None], dtype=object), skipna=True + ) + assert not lib.is_string_array( + np.array(["foo", "bar", None], dtype=object), skipna=False + ) + assert not lib.is_string_array( + np.array(["foo", "bar", np.nan], dtype=object), skipna=False + ) assert not lib.is_string_array(np.array([1, 2])) def test_to_object_array_tuples(self):
- [x] closes #40839 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Looking at OP #30980 this was done to ensure that a double pass is not made over data, but I don't think I'm doing that here. This is a precursor for #40687 Marking as needs discussion since it might be controversial which nan-likes to allow in the constructor.
https://api.github.com/repos/pandas-dev/pandas/pulls/41412
2021-05-10T23:21:07Z
2022-04-24T03:19:17Z
null
2022-05-12T18:46:06Z
Bug in read_csv and read_excel not applying dtype to second col with dup cols
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 5adc8540e6864..d9f8bee3acdec 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -837,6 +837,7 @@ I/O - Bug in :func:`read_excel` raising ``AttributeError`` with ``MultiIndex`` header followed by two empty rows and no index, and bug affecting :func:`read_excel`, :func:`read_csv`, :func:`read_table`, :func:`read_fwf`, and :func:`read_clipboard` where one blank row after a ``MultiIndex`` header with no index would be dropped (:issue:`40442`) - Bug in :meth:`DataFrame.to_string` misplacing the truncation column when ``index=False`` (:issue:`40907`) - Bug in :func:`read_orc` always raising ``AttributeError`` (:issue:`40918`) +- Bug in :func:`read_csv` and :func:`read_excel` not respecting dtype for duplicated column name when ``mangle_dupe_cols`` is set to ``True`` (:issue:`35211`) - Bug in :func:`read_csv` and :func:`read_table` misinterpreting arguments when ``sys.setprofile`` had been previously called (:issue:`41069`) - Bug in the conversion from pyarrow to pandas (e.g. for reading Parquet) with nullable dtypes and a pyarrow array whose data buffer size is not a multiple of dtype size (:issue:`40896`) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 8d9f1773590b0..0878aff562c12 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -685,10 +685,17 @@ cdef class TextReader: count = counts.get(name, 0) if not self.has_mi_columns and self.mangle_dupe_cols: - while count > 0: - counts[name] = count + 1 - name = f'{name}.{count}' - count = counts.get(name, 0) + if count > 0: + while count > 0: + counts[name] = count + 1 + name = f'{name}.{count}' + count = counts.get(name, 0) + if ( + self.dtype is not None + and self.dtype.get(old_name) is not None + and self.dtype.get(name) is None + ): + self.dtype.update({name: self.dtype.get(old_name)}) if old_name == '': unnamed_cols.add(name) diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 0055f3123f3c0..1d70b6e59c51b 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -421,12 +421,20 @@ def _infer_columns(self): counts: DefaultDict = defaultdict(int) for i, col in enumerate(this_columns): + old_col = col cur_count = counts[col] - while cur_count > 0: - counts[col] = cur_count + 1 - col = f"{col}.{cur_count}" - cur_count = counts[col] + if cur_count > 0: + while cur_count > 0: + counts[col] = cur_count + 1 + col = f"{col}.{cur_count}" + cur_count = counts[col] + if ( + self.dtype is not None + and self.dtype.get(old_col) is not None + and self.dtype.get(col) is None + ): + self.dtype.update({col: self.dtype.get(old_col)}) this_columns[i] = col counts[col] = cur_count + 1 diff --git a/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.ods b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.ods new file mode 100644 index 0000000000000..66558c16319fc Binary files /dev/null and b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.ods differ diff --git a/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xls b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xls new file mode 100644 index 0000000000000..472ad75901286 Binary files /dev/null and b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xls differ diff --git a/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xlsb b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xlsb new file mode 100755 index 0000000000000..5052102c6655d Binary files /dev/null and b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xlsb differ diff --git a/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xlsm b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xlsm new file mode 100644 index 0000000000000..51edc7f94f9d8 Binary files /dev/null and b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xlsm differ diff --git a/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xlsx b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xlsx new file mode 100644 index 0000000000000..ec4e49add4233 Binary files /dev/null and b/pandas/tests/io/data/excel/df_mangle_dup_col_dtypes.xlsx differ diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 632de5f70f64a..a46cb70097bd8 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -555,6 +555,14 @@ def test_reader_dtype_str(self, read_ext, dtype, expected): actual = pd.read_excel(basename + read_ext, dtype=dtype) tm.assert_frame_equal(actual, expected) + @pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)]) + def test_dtype_mangle_dup_cols(self, read_ext, dtypes, exp_value): + # GH#35211 + basename = "df_mangle_dup_col_dtypes" + result = pd.read_excel(basename + read_ext, dtype={"a": str, **dtypes}) + expected = DataFrame({"a": ["1"], "a.1": [exp_value]}) + tm.assert_frame_equal(result, expected) + def test_reader_spaces(self, read_ext): # see gh-32207 basename = "test_spaces" diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py index e452159189d4a..59fd3de60e0bf 100644 --- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py +++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py @@ -238,3 +238,13 @@ def test_true_values_cast_to_bool(all_parsers): ) expected["a"] = expected["a"].astype("boolean") tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)]) +def test_dtype_mangle_dup_cols(all_parsers, dtypes, exp_value): + # GH#35211 + parser = all_parsers + data = """a,a\n1,1""" + result = parser.read_csv(StringIO(data), dtype={"a": str, **dtypes}) + expected = DataFrame({"a": ["1"], "a.1": [exp_value]}) + tm.assert_frame_equal(result, expected)
- [x] closes #35211 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41411
2021-05-10T22:03:52Z
2021-05-12T13:55:50Z
2021-05-12T13:55:50Z
2021-06-15T18:26:58Z
BUG: fix TypeError when looking up a str subclass on a DataFrame with…
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index cf3dd1b0e3226..6b59349ce52b2 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -790,6 +790,7 @@ Indexing - Bug in :meth:`DataFrame.loc.__setitem__` when setting-with-expansion incorrectly raising when the index in the expanding axis contains duplicates (:issue:`40096`) - Bug in :meth:`DataFrame.loc` incorrectly matching non-boolean index elements (:issue:`20432`) - Bug in :meth:`Series.__delitem__` with ``ExtensionDtype`` incorrectly casting to ``ndarray`` (:issue:`40386`) +- Bug in :meth:`DataFrame.__setitem__` raising ``TypeError`` when using a str subclass as the column name with a :class:`DatetimeIndex` (:issue:`37366`) Missing ^^^^^^^ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 96aeda955df01..2f8919644486b 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2289,7 +2289,7 @@ def convert_to_index_sliceable(obj: DataFrame, key): # slice here via partial string indexing if idx._supports_partial_string_indexing: try: - res = idx._get_string_slice(key) + res = idx._get_string_slice(str(key)) warnings.warn( "Indexing a DataFrame with a datetimelike index using a single " "string to slice the rows, like `frame[string]`, is deprecated " diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index 29a037c1d3b52..e46eed05caa86 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -152,3 +152,16 @@ def test_getitem_millisecond_resolution(self, frame_or_series): ], ) tm.assert_equal(result, expected) + + def test_str_subclass(self): + # GH 37366 + class mystring(str): + pass + + data = ["2020-10-22 01:21:00+00:00"] + index = pd.DatetimeIndex(data) + df = DataFrame({"a": [1]}, index=index) + df["b"] = 2 + df[mystring("c")] = 3 + expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index) + tm.assert_equal(df, expected)
… DatetimeIndex (#37366) - [X] closes #xxxx - [X] tests added / passed - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41406
2021-05-10T04:52:06Z
2021-05-10T14:29:06Z
2021-05-10T14:29:06Z
2021-05-10T14:29:11Z
BUG: Rolling.__iter__ includes on index columns in the result
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 3a67e848024ac..2a0e9f44740bb 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -994,6 +994,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.__getitem__` with non-unique columns incorrectly returning a malformed :class:`SeriesGroupBy` instead of :class:`DataFrameGroupBy` (:issue:`41427`) - Bug in :meth:`DataFrameGroupBy.transform` with non-unique columns incorrectly raising ``AttributeError`` (:issue:`41427`) - Bug in :meth:`Resampler.apply` with non-unique columns incorrectly dropping duplicated columns (:issue:`41445`) +- Bug in :meth:`DataFrame.rolling.__iter__` where ``on`` was not assigned to the index of the resulting objects (:issue:`40373`) - Bug in :meth:`DataFrameGroupBy.transform` and :meth:`DataFrameGroupBy.agg` with ``engine="numba"`` where ``*args`` were being cached with the user passed function (:issue:`41647`) Reshaping diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 0ef0896df8d44..dfb74b38cd9cf 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -291,6 +291,7 @@ def __repr__(self) -> str: def __iter__(self): obj = self._create_data(self._selected_obj) + obj = obj.set_axis(self._on) indexer = self._get_window_indexer() start, end = indexer.get_window_bounds( diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 4846e15da039f..7a3e1e002759d 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -742,7 +742,7 @@ def test_iter_rolling_dataframe(df, expected, window, min_periods): ], ) def test_iter_rolling_on_dataframe(expected, window): - # GH 11704 + # GH 11704, 40373 df = DataFrame( { "A": [1, 2, 3, 4, 5], @@ -751,7 +751,9 @@ def test_iter_rolling_on_dataframe(expected, window): } ) - expected = [DataFrame(values, index=index) for (values, index) in expected] + expected = [ + DataFrame(values, index=df.loc[index, "C"]) for (values, index) in expected + ] for (expected, actual) in zip(expected, df.rolling(window, on="C")): tm.assert_frame_equal(actual, expected)
- [x] closes #40373 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41405
2021-05-10T03:49:50Z
2021-05-26T03:27:26Z
2021-05-26T03:27:26Z
2021-05-26T03:27:29Z
REF: set _selection only in __init__
diff --git a/pandas/core/resample.py b/pandas/core/resample.py index aae6314968695..1ab2b90d6564a 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -139,6 +139,8 @@ def __init__( groupby: TimeGrouper, axis: int = 0, kind=None, + *, + selection=None, **kwargs, ): self.groupby = groupby @@ -152,6 +154,7 @@ def __init__( self.groupby._set_grouper(self._convert_obj(obj), sort=True) self.binner, self.grouper = self._get_binner() + self._selection = selection @final def _shallow_copy(self, obj, **kwargs): @@ -1080,13 +1083,16 @@ def _gotitem(self, key, ndim, subset=None): except IndexError: groupby = self._groupby - self = type(self)(subset, groupby=groupby, parent=self, **kwargs) - self._reset_cache() + selection = None if subset.ndim == 2 and ( - lib.is_scalar(key) and key in subset or lib.is_list_like(key) + (lib.is_scalar(key) and key in subset) or lib.is_list_like(key) ): - self._selection = key - return self + selection = key + + new_rs = type(self)( + subset, groupby=groupby, parent=self, selection=selection, **kwargs + ) + return new_rs class DatetimeIndexResampler(Resampler): diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 08a65964f278e..4187c56079060 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -268,6 +268,8 @@ def __init__( ignore_na: bool = False, axis: Axis = 0, times: str | np.ndarray | FrameOrSeries | None = None, + *, + selection=None, ): super().__init__( obj=obj, @@ -277,6 +279,7 @@ def __init__( closed=None, method="single", axis=axis, + selection=selection, ) self.com = com self.span = span diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index ac1ebfd4b0825..cddb3ef56250d 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -102,9 +102,15 @@ def __init__( center=None, axis: Axis = 0, method: str = "single", + selection=None, ): super().__init__( - obj=obj, min_periods=min_periods, center=center, axis=axis, method=method + obj=obj, + min_periods=min_periods, + center=center, + axis=axis, + method=method, + selection=selection, ) def _get_window_indexer(self) -> BaseIndexer: diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 490cdca8519e6..5e038635305f1 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -123,6 +123,8 @@ def __init__( on: str | Index | None = None, closed: str | None = None, method: str = "single", + *, + selection=None, ): self.obj = obj self.on = on @@ -150,6 +152,8 @@ def __init__( f"invalid on specified as {self.on}, " "must be a column (of DataFrame), an Index or None" ) + + self._selection = selection self.validate() @property @@ -242,16 +246,22 @@ def _gotitem(self, key, ndim, subset=None): # create a new object to prevent aliasing if subset is None: subset = self.obj - # TODO: Remove once win_type deprecation is enforced + + # we need to make a shallow copy of ourselves + # with the same groupby with warnings.catch_warnings(): + # TODO: Remove once win_type deprecation is enforced warnings.filterwarnings("ignore", "win_type", FutureWarning) - self = type(self)( - subset, **{attr: getattr(self, attr) for attr in self._attributes} - ) - if subset.ndim == 2: - if is_scalar(key) and key in subset or is_list_like(key): - self._selection = key - return self + kwargs = {attr: getattr(self, attr) for attr in self._attributes} + + selection = None + if subset.ndim == 2 and ( + (is_scalar(key) and key in subset) or is_list_like(key) + ): + selection = key + + new_win = type(self)(subset, selection=selection, **kwargs) + return new_win def __getattr__(self, attr: str): if attr in self._internal_names_set:
Trying to make these less stateful.
https://api.github.com/repos/pandas-dev/pandas/pulls/41403
2021-05-09T19:56:50Z
2021-05-17T18:03:56Z
2021-05-17T18:03:56Z
2021-05-17T18:06:09Z
DOC: Adding Cylon under ecosystem/out of core
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index d53d0556dca04..bc2325f15852c 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -405,6 +405,35 @@ Blaze provides a standard API for doing computations with various in-memory and on-disk backends: NumPy, pandas, SQLAlchemy, MongoDB, PyTables, PySpark. +`Cylon <https://cylondata.org/>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Cylon is a fast, scalable, distributed memory parallel runtime with a pandas +like Python DataFrame API. ”Core Cylon” is implemented with C++ using Apache +Arrow format to represent the data in-memory. Cylon DataFrame API implements +most of the core operators of pandas such as merge, filter, join, concat, +group-by, drop_duplicates, etc. These operators are designed to work across +thousands of cores to scale applications. It can interoperate with pandas +DataFrame by reading data from pandas or converting data to pandas so users +can selectively scale parts of their pandas DataFrame applications. + +.. code:: python + + from pycylon import read_csv, DataFrame, CylonEnv + from pycylon.net import MPIConfig + + # Initialize Cylon distributed environment + config: MPIConfig = MPIConfig() + env: CylonEnv = CylonEnv(config=config, distributed=True) + + df1: DataFrame = read_csv('/tmp/csv1.csv') + df2: DataFrame = read_csv('/tmp/csv2.csv') + + # Using 1000s of cores across the cluster to compute the join + df3: Table = df1.join(other=df2, on=[0], algorithm="hash", env=env) + + print(df3) + `Dask <https://dask.readthedocs.io/en/latest/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them This PR adds a link and a description about Cylon to the ecosystem page. Github: [https://github.com/cylondata/cylon](https://github.com/cylondata/cylon) Web: [https://cylondata.org/](https://cylondata.org/)
https://api.github.com/repos/pandas-dev/pandas/pulls/41402
2021-05-09T16:57:45Z
2021-05-12T14:07:30Z
2021-05-12T14:07:30Z
2021-05-12T14:07:35Z
REF: groupby remove _selection_name
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 9d3437fe08b24..d0c6a1a841edb 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -322,6 +322,7 @@ def agg_list_like(self) -> FrameOrSeriesUnion: # i.e. obj is Series or DataFrame selected_obj = obj elif obj._selected_obj.ndim == 1: + # For SeriesGroupBy this matches _obj_with_exclusions selected_obj = obj._selected_obj else: selected_obj = obj._obj_with_exclusions diff --git a/pandas/core/base.py b/pandas/core/base.py index e45c4bf514973..ff94c833d5268 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -180,16 +180,6 @@ class SelectionMixin(Generic[FrameOrSeries]): _internal_names = ["_cache", "__setstate__"] _internal_names_set = set(_internal_names) - @property - def _selection_name(self): - """ - Return a name for myself; - - This would ideally be called the 'name' property, - but we cannot conflict with the Series.name property which can be set. - """ - return self._selection - @final @property def _selection_list(self): diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index c5d9144893f48..4fff12d45af7d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -11,7 +11,6 @@ abc, namedtuple, ) -import copy from functools import partial from textwrap import dedent from typing import ( @@ -169,18 +168,6 @@ class SeriesGroupBy(GroupBy[Series]): def _iterate_slices(self) -> Iterable[Series]: yield self._selected_obj - @property - def _selection_name(self): - """ - since we are a series, we by definition only have - a single name, but may be the result of a selection or - the name of our object - """ - if self._selection is None: - return self.obj.name - else: - return self._selection - _agg_examples_doc = dedent( """ Examples @@ -314,15 +301,9 @@ def _aggregate_multiple_funcs(self, arg) -> DataFrame: results: dict[base.OutputKey, FrameOrSeriesUnion] = {} for idx, (name, func) in enumerate(arg): - obj = self - # reset the cache so that we - # only include the named selection - if name in self._selected_obj: - obj = copy.copy(obj) - obj._reset_cache() - obj._selection = name - results[base.OutputKey(label=name, position=idx)] = obj.aggregate(func) + key = base.OutputKey(label=name, position=idx) + results[key] = self.aggregate(func) if any(isinstance(x, DataFrame) for x in results.values()): from pandas import concat @@ -464,7 +445,7 @@ def _wrap_applied_output( # GH #6265 return self.obj._constructor( [], - name=self._selection_name, + name=self.obj.name, index=self.grouper.result_index, dtype=data.dtype, ) @@ -485,14 +466,14 @@ def _get_index() -> Index: # if self.observed is False, # keep all-NaN rows created while re-indexing res_ser = res_df.stack(dropna=self.observed) - res_ser.name = self._selection_name + res_ser.name = self.obj.name return res_ser elif isinstance(values[0], (Series, DataFrame)): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) else: # GH #6265 #24880 result = self.obj._constructor( - data=values, index=_get_index(), name=self._selection_name + data=values, index=_get_index(), name=self.obj.name ) return self._reindex_output(result) @@ -550,7 +531,7 @@ def _transform_general(self, func: Callable, *args, **kwargs) -> Series: Transform with a callable func`. """ assert callable(func) - klass = type(self._selected_obj) + klass = type(self.obj) results = [] for name, group in self: @@ -572,8 +553,10 @@ def _transform_general(self, func: Callable, *args, **kwargs) -> Series: else: result = self.obj._constructor(dtype=np.float64) - result.name = self._selected_obj.name - return result + result.name = self.obj.name + # error: Incompatible return value type (got "Union[DataFrame, Series]", + # expected "Series") + return result # type: ignore[return-value] def _can_use_transform_fast(self, result) -> bool: return True @@ -693,7 +676,7 @@ def nunique(self, dropna: bool = True) -> Series: res, out = np.zeros(len(ri), dtype=out.dtype), res res[ids[idx]] = out - result = self.obj._constructor(res, index=ri, name=self._selection_name) + result = self.obj._constructor(res, index=ri, name=self.obj.name) return self._reindex_output(result, fill_value=0) @doc(Series.describe) @@ -799,7 +782,7 @@ def apply_series_value_counts(): levels = [ping.group_index for ping in self.grouper.groupings] + [ lev # type: ignore[list-item] ] - names = self.grouper.names + [self._selection_name] + names = self.grouper.names + [self.obj.name] if dropna: mask = codes[-1] != -1 @@ -855,7 +838,7 @@ def build_codes(lev_codes: np.ndarray) -> np.ndarray: if is_integer_dtype(out.dtype): out = ensure_int64(out) - return self.obj._constructor(out, index=mi, name=self._selection_name) + return self.obj._constructor(out, index=mi, name=self.obj.name) def count(self) -> Series: """ @@ -876,7 +859,7 @@ def count(self) -> Series: result = self.obj._constructor( out, index=self.grouper.result_index, - name=self._selection_name, + name=self.obj.name, dtype="int64", ) return self._reindex_output(result, fill_value=0) @@ -1048,7 +1031,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) if isinstance(sobj, Series): # GH#35246 test_groupby_as_index_select_column_sum_empty_df - result.columns = [self._selected_obj.name] + result.columns = [sobj.name] else: # select everything except for the last level, which is the one # containing the name of the function(s), see GH#32040 @@ -1174,11 +1157,11 @@ def _wrap_applied_output(self, data, keys, values, not_indexed_same=False): # TODO: sure this is right? we used to do this # after raising AttributeError above return self.obj._constructor_sliced( - values, index=key_index, name=self._selection_name + values, index=key_index, name=self._selection ) elif not isinstance(first_not_none, Series): # values are not series or array-like but scalars - # self._selection_name not passed through to Series as the + # self._selection not passed through to Series as the # result should not take the name of original selection # of columns if self.as_index: diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 2510fcaa84f1c..2091d2fc484e1 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1052,9 +1052,10 @@ def reset_identity(values): values = reset_identity(values) result = concat(values, axis=self.axis) - if isinstance(result, Series) and self._selection_name is not None: + name = self.obj.name if self.obj.ndim == 1 else self._selection + if isinstance(result, Series) and name is not None: - result.name = self._selection_name + result.name = name return result
We use self._selected_obj and self._obj_with_exclusions in different places with no clear distinction as to when to use which. I _think_ in pretty much all cases we really want obj_with_exclusions, am moving towards pinning that down. The only non-trivial thing here is getting rid of pinning _selection in [here](https://github.com/pandas-dev/pandas/compare/master...jbrockmendel:cln-selection_name?expand=1#diff-f1ec980d06b0b54c8263f663f767636c3f5921f04f8a7ee91dfc71a2100b05cdL326). We have no tests that reach that. I can cook up a case that gets there, but haven't found any cases where removing it actually changes anything.
https://api.github.com/repos/pandas-dev/pandas/pulls/41401
2021-05-09T16:33:47Z
2021-05-12T01:11:45Z
2021-05-12T01:11:45Z
2021-05-12T01:37:26Z
DOC: Remove multiple blank lines in ipython directives
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst index 8d38c12252df4..70cfa3500f6b4 100644 --- a/doc/source/user_guide/basics.rst +++ b/doc/source/user_guide/basics.rst @@ -1184,11 +1184,9 @@ a single value and returning a single value. For example: df4 - def f(x): return len(str(x)) - df4["one"].map(f) df4.applymap(f) diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst index 94a5f807d2262..e1aae0fd481b1 100644 --- a/doc/source/user_guide/cookbook.rst +++ b/doc/source/user_guide/cookbook.rst @@ -494,15 +494,12 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to S = pd.Series([i / 100.0 for i in range(1, 11)]) - def cum_ret(x, y): return x * (1 + y) - def red(x): return functools.reduce(cum_ret, x, 1.0) - S.expanding().apply(red, raw=True) @@ -514,12 +511,10 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to df = pd.DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]}) gb = df.groupby("A") - def replace(g): mask = g < 0 return g.where(mask, g[~mask].mean()) - gb.transform(replace) `Sort groups by aggregated data @@ -551,13 +546,11 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to rng = pd.date_range(start="2014-10-07", periods=10, freq="2min") ts = pd.Series(data=list(range(10)), index=rng) - def MyCust(x): if len(x) > 2: return x[1] * 1.234 return pd.NaT - mhc = {"Mean": np.mean, "Max": np.max, "Custom": MyCust} ts.resample("5min").apply(mhc) ts @@ -803,11 +796,9 @@ Apply index=["I", "II", "III"], ) - def SeriesFromSubList(aList): return pd.Series(aList) - df_orgz = pd.concat( {ind: row.apply(SeriesFromSubList) for ind, row in df.iterrows()} ) @@ -827,12 +818,10 @@ Rolling Apply to multiple columns where function calculates a Series before a Sc ) df - def gm(df, const): v = ((((df["A"] + df["B"]) + 1).cumprod()) - 1) * const return v.iloc[-1] - s = pd.Series( { df.index[i]: gm(df.iloc[i: min(i + 51, len(df) - 1)], 5) @@ -859,11 +848,9 @@ Rolling Apply to multiple columns where function returns a Scalar (Volume Weight ) df - def vwap(bars): return (bars.Close * bars.Volume).sum() / bars.Volume.sum() - window = 5 s = pd.concat( [ diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index 3f596388ca226..ef6d45fa0140b 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -1617,12 +1617,10 @@ column index name will be used as the name of the inserted column: } ) - def compute_metrics(x): result = {"b_sum": x["b"].sum(), "c_mean": x["c"].mean()} return pd.Series(result, name="metrics") - result = df.groupby("a").apply(compute_metrics) result diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 5148bb87b0eb0..7f0cd613726dc 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -4648,11 +4648,9 @@ chunks. store.append("dfeq", dfeq, data_columns=["number"]) - def chunks(l, n): return [l[i: i + n] for i in range(0, len(l), n)] - evens = [2, 4, 6, 8, 10] coordinates = store.select_as_coordinates("dfeq", "number=evens") for c in chunks(coordinates, 2): diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst index d8998a9a0a6e1..09b3d3a8c96df 100644 --- a/doc/source/user_guide/merging.rst +++ b/doc/source/user_guide/merging.rst @@ -1578,4 +1578,5 @@ to ``True``. You may also keep all the original values even if they are equal. .. ipython:: python + df.compare(df2, keep_shape=True, keep_equal=True) diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index 77cf43b2e2b19..7d1d03fe020a6 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -18,7 +18,6 @@ Reshaping by pivoting DataFrame objects import pandas._testing as tm - def unpivot(frame): N, K = frame.shape data = { @@ -29,7 +28,6 @@ Reshaping by pivoting DataFrame objects columns = ["date", "variable", "value"] return pd.DataFrame(data, columns=columns) - df = unpivot(tm.makeTimeDataFrame(3)) Data is often stored in so-called "stacked" or "record" format: diff --git a/doc/source/user_guide/sparse.rst b/doc/source/user_guide/sparse.rst index e4eea57c43dbb..982a5b0a70b55 100644 --- a/doc/source/user_guide/sparse.rst +++ b/doc/source/user_guide/sparse.rst @@ -325,7 +325,6 @@ In the example below, we transform the ``Series`` to a sparse representation of row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True ) - A A.todense() rows diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst index 9b1c9b8d04270..db9485f3f2348 100644 --- a/doc/source/user_guide/text.rst +++ b/doc/source/user_guide/text.rst @@ -297,24 +297,19 @@ positional argument (a regex object) and return a string. # Reverse every lowercase alphabetic word pat = r"[a-z]+" - def repl(m): return m.group(0)[::-1] - pd.Series(["foo 123", "bar baz", np.nan], dtype="string").str.replace( pat, repl, regex=True ) - # Using regex groups pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" - def repl(m): return m.group("two").swapcase() - pd.Series(["Foo Bar Baz", np.nan], dtype="string").str.replace( pat, repl, regex=True ) diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 01ff62d984544..6f005f912fe37 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -1422,7 +1422,6 @@ An example of how holidays and holiday calendars are defined: MO, ) - class ExampleCalendar(AbstractHolidayCalendar): rules = [ USMemorialDay, @@ -1435,7 +1434,6 @@ An example of how holidays and holiday calendars are defined: ), ] - cal = ExampleCalendar() cal.holidays(datetime.datetime(2012, 1, 1), datetime.datetime(2012, 12, 31)) @@ -1707,13 +1705,11 @@ We can instead only resample those groups where we have points as follows: from functools import partial from pandas.tseries.frequencies import to_offset - def round(t, freq): # round a Timestamp to a specified freq freq = to_offset(freq) return pd.Timestamp((t.value // freq.delta.value) * freq.delta.value) - ts.groupby(partial(round, freq="3T")).sum() .. _timeseries.aggregate: @@ -2255,11 +2251,9 @@ To convert from an ``int64`` based YYYYMMDD representation. s = pd.Series([20121231, 20141130, 99991231]) s - def conv(x): return pd.Period(year=x // 10000, month=x // 100 % 100, day=x % 100, freq="D") - s.apply(conv) s.apply(conv)[2] diff --git a/doc/source/user_guide/window.rst b/doc/source/user_guide/window.rst index b692685b90234..c8687f808a802 100644 --- a/doc/source/user_guide/window.rst +++ b/doc/source/user_guide/window.rst @@ -212,7 +212,6 @@ from present information back to past information. This allows the rolling windo df - .. _window.custom_rolling_window: Custom window rolling @@ -294,13 +293,12 @@ conditions. In these cases it can be useful to perform forward-looking rolling w This :func:`BaseIndexer <pandas.api.indexers.BaseIndexer>` subclass implements a closed fixed-width forward-looking rolling window, and we can use it as follows: -.. ipython:: ipython +.. ipython:: python from pandas.api.indexers import FixedForwardWindowIndexer indexer = FixedForwardWindowIndexer(window_size=2) df.rolling(indexer, min_periods=1).sum() - .. _window.rolling_apply: Rolling apply @@ -319,7 +317,6 @@ the windows are cast as :class:`Series` objects (``raw=False``) or ndarray objec s = pd.Series(range(10)) s.rolling(window=4).apply(mad, raw=True) - .. _window.numba_engine: Numba engine
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Related to #26788 This PR gets rid of all warnings of the form "UserWarning: Code input with no code at..." when building the docs. When there are multiple blank lines, e.g. ``` foo = 5 def bar(): pass ``` one of the blank lines is reported as having no code. That said, I'm not sure if this is the right approach, as the double blank likes are PEP8 compliant. Perhaps this should be reported upstream instead and the multiple blank lines left in the docs. cc @jorisvandenbossche @TomAugspurger
https://api.github.com/repos/pandas-dev/pandas/pulls/41400
2021-05-09T16:20:45Z
2021-05-10T14:37:40Z
2021-05-10T14:37:40Z
2021-05-10T20:05:35Z
WIP: groupby skipna
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 3fa92ce2229c3..2f2715d4b0e5d 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -482,7 +482,8 @@ def group_add(complexfloating_t[:, ::1] out, int64_t[::1] counts, ndarray[complexfloating_t, ndim=2] values, const intp_t[:] labels, - Py_ssize_t min_count=0) -> None: + Py_ssize_t min_count=0, + bint skipna=True) -> None: """ Only aggregates on axis=0 using Kahan summation """ @@ -520,6 +521,13 @@ def group_add(complexfloating_t[:, ::1] out, t = sumx[lab, j] + y compensation[lab, j] = t - sumx[lab, j] - y sumx[lab, j] = t + # dont skip nan + elif skipna == False: + sumx[lab, j] = NAN + break + # skip nan + else: + continue for i in range(ncounts): for j in range(K): @@ -535,7 +543,8 @@ def group_prod(floating[:, ::1] out, int64_t[::1] counts, ndarray[floating, ndim=2] values, const intp_t[:] labels, - Py_ssize_t min_count=0) -> None: + Py_ssize_t min_count=0, + bint skipna=True) -> None: """ Only aggregates on axis=0 """ @@ -568,6 +577,11 @@ def group_prod(floating[:, ::1] out, if val == val: nobs[lab, j] += 1 prodx[lab, j] *= val + elif skipna == False: + prodx[lab, j] = NAN + break + else: + continue for i in range(ncounts): for j in range(K): @@ -585,6 +599,7 @@ def group_var(floating[:, ::1] out, ndarray[floating, ndim=2] values, const intp_t[:] labels, Py_ssize_t min_count=-1, + bint skipna=True, int64_t ddof=1) -> None: cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) @@ -622,6 +637,11 @@ def group_var(floating[:, ::1] out, oldmean = mean[lab, j] mean[lab, j] += (val - oldmean) / nobs[lab, j] out[lab, j] += (val - mean[lab, j]) * (val - oldmean) + elif skipna == False: + out[lab, j] = NAN + break + else: + continue for i in range(ncounts): for j in range(K): diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 9d6d2d698dfe5..d48365a79f201 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -346,9 +346,10 @@ def _aggregate_multiple_funcs(self, arg): return self.obj._constructor_expanddim(output, columns=columns) def _cython_agg_general( - self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 + self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1, skipna: bool = True ): output: dict[base.OutputKey, ArrayLike] = {} + # MAYUKH # Ideally we would be able to enumerate self._iterate_slices and use # the index from enumeration as the key of output, but ohlc in particular # returns a (n x 4) array. Output requires 1D ndarrays as values, so we @@ -361,7 +362,7 @@ def _cython_agg_general( continue result = self.grouper._cython_operation( - "aggregate", obj._values, how, axis=0, min_count=min_count + "aggregate", obj._values, how, axis=0, min_count=min_count, skipna=skipna ) assert result.ndim == 1 key = base.OutputKey(label=name, position=idx) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f579b04db898e..d02327d42dfec 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1269,6 +1269,7 @@ def _agg_general( *, alias: str, npfunc: Callable, + skipna=True, ): with group_selection_context(self): # try a cython aggregation if we can @@ -1279,6 +1280,7 @@ def _agg_general( alt=npfunc, numeric_only=numeric_only, min_count=min_count, + skipna=skipna ) except DataError: pass @@ -1298,7 +1300,7 @@ def _agg_general( return result.__finalize__(self.obj, method="groupby") def _cython_agg_general( - self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 + self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1, skipna: bool = False ): raise AbstractMethodError(self) @@ -1691,7 +1693,7 @@ def size(self) -> FrameOrSeriesUnion: @final @doc(_groupby_agg_method_template, fname="sum", no=True, mc=0) - def sum(self, numeric_only: bool = True, min_count: int = 0): + def sum(self, numeric_only: bool = True, min_count: int = 0, skipna=True): # If we are grouping on categoricals we want unobserved categories to # return zero, rather than the default of NaN which the reindexing in @@ -1702,15 +1704,16 @@ def sum(self, numeric_only: bool = True, min_count: int = 0): min_count=min_count, alias="add", npfunc=np.sum, + skipna=skipna ) return self._reindex_output(result, fill_value=0) @final @doc(_groupby_agg_method_template, fname="prod", no=True, mc=0) - def prod(self, numeric_only: bool = True, min_count: int = 0): + def prod(self, numeric_only: bool = True, min_count: int = 0, skipna: bool = True): return self._agg_general( - numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod + numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod, skipna=skipna ) @final diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 3ee185d862b01..0b1df3592c597 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -707,12 +707,14 @@ def _cython_operation( how: str, axis: int, min_count: int = -1, + skipna: bool = True, mask: np.ndarray | None = None, **kwargs, ) -> ArrayLike: """ Returns the values of a cython operation. """ + #MAYUKH orig_values = values assert kind in ["transform", "aggregate"] @@ -726,6 +728,7 @@ def _cython_operation( dtype = values.dtype is_numeric = is_numeric_dtype(dtype) + #MAYUKH cy_op = WrappedCythonOp(kind=kind, how=how) # can we do this operation with our cython functions @@ -736,11 +739,11 @@ def _cython_operation( if is_extension_array_dtype(dtype): if isinstance(values, BaseMaskedArray) and func_uses_mask: return self._masked_ea_wrap_cython_operation( - cy_op, kind, values, how, axis, min_count, **kwargs + cy_op, kind, values, how, axis, min_count, skipna, **kwargs ) else: return self._ea_wrap_cython_operation( - cy_op, kind, values, how, axis, min_count, **kwargs + cy_op, kind, values, how, axis, min_count, skipna, **kwargs ) elif values.ndim == 1: @@ -752,6 +755,7 @@ def _cython_operation( how=how, axis=1, min_count=min_count, + skipna=skipna, mask=mask, **kwargs, ) @@ -802,7 +806,8 @@ def _cython_operation( is_datetimelike=is_datetimelike, ) else: - func(result, counts, values, comp_ids, min_count) + #MAYUKH + func(result, counts, values, comp_ids, min_count, skipna) elif kind == "transform": # TODO: min_count if func_uses_mask:
- [ ] closes #15675 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry New duplicate of #15772 since I had lost the previous branch. Work in progress.
https://api.github.com/repos/pandas-dev/pandas/pulls/41399
2021-05-09T15:28:41Z
2021-08-17T00:50:36Z
null
2021-09-14T20:13:12Z
DOC: Remove deprecated usage of Index.__xor__
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 84f1245299d53..cc044e52d0be1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3239,11 +3239,6 @@ def symmetric_difference(self, other, result_name=None, sort=None): >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Int64Index([1, 5], dtype='int64') - - You can also use the ``^`` operator: - - >>> idx1 ^ idx2 - Int64Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other)
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them ref #37374 cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/41398
2021-05-09T15:14:39Z
2021-05-10T23:19:58Z
2021-05-10T23:19:58Z
2021-05-10T23:54:04Z
DOC: specify regex=True in str.replace
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index f8df05a7022d1..9f8c9fa2f0515 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -1231,7 +1231,7 @@ def replace(self, pat, repl, n=-1, case=None, flags=0, regex=None): Regex module flags, e.g. re.IGNORECASE. Cannot be set if `pat` is a compiled regex. regex : bool, default True - Determines if assumes the passed-in pattern is a regular expression: + Determines if the passed-in pattern is a regular expression: - If True, assumes the passed-in pattern is a regular expression. - If False, treats the pattern as a literal string @@ -1287,7 +1287,7 @@ def replace(self, pat, repl, n=-1, case=None, flags=0, regex=None): To get the idea: - >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr) + >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr, regex=True) 0 <re.Match object; span=(0, 1), match='f'>oo 1 <re.Match object; span=(0, 1), match='f'>uz 2 NaN @@ -1296,7 +1296,8 @@ def replace(self, pat, repl, n=-1, case=None, flags=0, regex=None): Reverse every lowercase alphabetic word: >>> repl = lambda m: m.group(0)[::-1] - >>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl) + >>> ser = pd.Series(['foo 123', 'bar baz', np.nan]) + >>> ser.str.replace(r'[a-z]+', repl, regex=True) 0 oof 123 1 rab zab 2 NaN @@ -1306,7 +1307,8 @@ def replace(self, pat, repl, n=-1, case=None, flags=0, regex=None): >>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)" >>> repl = lambda m: m.group('two').swapcase() - >>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl) + >>> ser = pd.Series(['One Two Three', 'Foo Bar Baz']) + >>> ser.str.replace(pat, repl, regex=True) 0 tWO 1 bAR dtype: object @@ -1315,7 +1317,7 @@ def replace(self, pat, repl, n=-1, case=None, flags=0, regex=None): >>> import re >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE) - >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar') + >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar', regex=True) 0 foo 1 bar 2 NaN
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them ref #36695 When passing a callable repl, str.replace will raise if regex is False. So it seems to me that in the future, when the default is changed, we should be ignoring the regex argument completely (and update the documentation to that effect). This is slightly magical, but I think better than having a default that raises. If this is the correct way forward, then we don't need to warn when repl is callable. cc @dsaxton @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/41397
2021-05-09T15:00:55Z
2021-05-10T21:57:20Z
2021-05-10T21:57:20Z
2021-05-10T22:12:56Z
[ArrowStringArray] TST: parametrize str.partition tests
diff --git a/pandas/tests/strings/test_split_partition.py b/pandas/tests/strings/test_split_partition.py index 6df8fa805955d..f8804d6dd6266 100644 --- a/pandas/tests/strings/test_split_partition.py +++ b/pandas/tests/strings/test_split_partition.py @@ -335,90 +335,90 @@ def test_split_with_name(): tm.assert_index_equal(res, exp) -def test_partition_series(): +def test_partition_series(any_string_dtype): # https://github.com/pandas-dev/pandas/issues/23558 - values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None]) + s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype) - result = values.str.partition("_", expand=False) - exp = Series( + result = s.str.partition("_", expand=False) + expected = Series( [("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h"), None] ) - tm.assert_series_equal(result, exp) + tm.assert_series_equal(result, expected) - result = values.str.rpartition("_", expand=False) - exp = Series( + result = s.str.rpartition("_", expand=False) + expected = Series( [("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h"), None] ) - tm.assert_series_equal(result, exp) + tm.assert_series_equal(result, expected) # more than one char - values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None]) - result = values.str.partition("__", expand=False) - exp = Series( + s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None]) + result = s.str.partition("__", expand=False) + expected = Series( [ ("a", "__", "b__c"), ("c", "__", "d__e"), np.nan, ("f", "__", "g__h"), None, - ] + ], ) - tm.assert_series_equal(result, exp) + tm.assert_series_equal(result, expected) - result = values.str.rpartition("__", expand=False) - exp = Series( + result = s.str.rpartition("__", expand=False) + expected = Series( [ ("a__b", "__", "c"), ("c__d", "__", "e"), np.nan, ("f__g", "__", "h"), None, - ] + ], ) - tm.assert_series_equal(result, exp) + tm.assert_series_equal(result, expected) # None - values = Series(["a b c", "c d e", np.nan, "f g h", None]) - result = values.str.partition(expand=False) - exp = Series( + s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype) + result = s.str.partition(expand=False) + expected = Series( [("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None] ) - tm.assert_series_equal(result, exp) + tm.assert_series_equal(result, expected) - result = values.str.rpartition(expand=False) - exp = Series( + result = s.str.rpartition(expand=False) + expected = Series( [("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None] ) - tm.assert_series_equal(result, exp) + tm.assert_series_equal(result, expected) # Not split - values = Series(["abc", "cde", np.nan, "fgh", None]) - result = values.str.partition("_", expand=False) - exp = Series([("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None]) - tm.assert_series_equal(result, exp) + s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype) + result = s.str.partition("_", expand=False) + expected = Series([("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None]) + tm.assert_series_equal(result, expected) - result = values.str.rpartition("_", expand=False) - exp = Series([("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None]) - tm.assert_series_equal(result, exp) + result = s.str.rpartition("_", expand=False) + expected = Series([("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None]) + tm.assert_series_equal(result, expected) # unicode - values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"]) + s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) - result = values.str.partition("_", expand=False) - exp = Series([("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")]) - tm.assert_series_equal(result, exp) + result = s.str.partition("_", expand=False) + expected = Series([("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")]) + tm.assert_series_equal(result, expected) - result = values.str.rpartition("_", expand=False) - exp = Series([("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")]) - tm.assert_series_equal(result, exp) + result = s.str.rpartition("_", expand=False) + expected = Series([("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")]) + tm.assert_series_equal(result, expected) # compare to standard lib - values = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"]) - result = values.str.partition("_", expand=False).tolist() - assert result == [v.partition("_") for v in values] - result = values.str.rpartition("_", expand=False).tolist() - assert result == [v.rpartition("_") for v in values] + s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype) + result = s.str.partition("_", expand=False).tolist() + assert result == [v.partition("_") for v in s] + result = s.str.rpartition("_", expand=False).tolist() + assert result == [v.rpartition("_") for v in s] def test_partition_index(): @@ -475,88 +475,96 @@ def test_partition_index(): assert result.nlevels == 3 -def test_partition_to_dataframe(): +def test_partition_to_dataframe(any_string_dtype): # https://github.com/pandas-dev/pandas/issues/23558 - values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None]) - result = values.str.partition("_") - exp = DataFrame( + s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype) + result = s.str.partition("_") + expected = DataFrame( { 0: ["a", "c", np.nan, "f", None], 1: ["_", "_", np.nan, "_", None], 2: ["b_c", "d_e", np.nan, "g_h", None], - } + }, + dtype=any_string_dtype, ) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) - result = values.str.rpartition("_") - exp = DataFrame( + result = s.str.rpartition("_") + expected = DataFrame( { 0: ["a_b", "c_d", np.nan, "f_g", None], 1: ["_", "_", np.nan, "_", None], 2: ["c", "e", np.nan, "h", None], - } + }, + dtype=any_string_dtype, ) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) - values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None]) - result = values.str.partition("_", expand=True) - exp = DataFrame( + s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype) + result = s.str.partition("_", expand=True) + expected = DataFrame( { 0: ["a", "c", np.nan, "f", None], 1: ["_", "_", np.nan, "_", None], 2: ["b_c", "d_e", np.nan, "g_h", None], - } + }, + dtype=any_string_dtype, ) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) - result = values.str.rpartition("_", expand=True) - exp = DataFrame( + result = s.str.rpartition("_", expand=True) + expected = DataFrame( { 0: ["a_b", "c_d", np.nan, "f_g", None], 1: ["_", "_", np.nan, "_", None], 2: ["c", "e", np.nan, "h", None], - } + }, + dtype=any_string_dtype, ) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) -def test_partition_with_name(): +def test_partition_with_name(any_string_dtype): # GH 12617 - s = Series(["a,b", "c,d"], name="xxx") - res = s.str.partition(",") - exp = DataFrame({0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}) - tm.assert_frame_equal(res, exp) + s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype) + result = s.str.partition(",") + expected = DataFrame( + {0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}, dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) # should preserve name - res = s.str.partition(",", expand=False) - exp = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx") - tm.assert_series_equal(res, exp) + result = s.str.partition(",", expand=False) + expected = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx") + tm.assert_series_equal(result, expected) + +def test_partition_index_with_name(): idx = Index(["a,b", "c,d"], name="xxx") - res = idx.str.partition(",") - exp = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")]) - assert res.nlevels == 3 - tm.assert_index_equal(res, exp) + result = idx.str.partition(",") + expected = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")]) + assert result.nlevels == 3 + tm.assert_index_equal(result, expected) # should preserve name - res = idx.str.partition(",", expand=False) - exp = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx") - assert res.nlevels == 1 - tm.assert_index_equal(res, exp) + result = idx.str.partition(",", expand=False) + expected = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx") + assert result.nlevels == 1 + tm.assert_index_equal(result, expected) -def test_partition_sep_kwarg(): +def test_partition_sep_kwarg(any_string_dtype): # GH 22676; depr kwarg "pat" in favor of "sep" - values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"]) + s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) - expected = values.str.partition(sep="_") - result = values.str.partition("_") + expected = s.str.partition(sep="_") + result = s.str.partition("_") tm.assert_frame_equal(result, expected) - expected = values.str.rpartition(sep="_") - result = values.str.rpartition("_") + expected = s.str.rpartition(sep="_") + result = s.str.rpartition("_") tm.assert_frame_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/41396
2021-05-09T14:48:14Z
2021-05-10T09:42:16Z
2021-05-10T09:42:16Z
2021-05-10T11:09:57Z
DOC: Remove deprecated use of level for agg functions
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9a57d86d62fdc..368d39ceac78b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9605,15 +9605,6 @@ def count( 3 3 4 3 dtype: int64 - - Counts for one level of a `MultiIndex`: - - >>> df.set_index(["Person", "Single"]).count(level="Person") - Age - Person - John 2 - Lewis 1 - Myla 1 """ axis = self._get_axis_number(axis) if level is not None: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d225ac6e6881b..bf87ed74dde2f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11806,21 +11806,7 @@ def _doc_params(cls): Name: legs, dtype: int64 >>> s.{stat_func}() -{default_output} - -{verb} using level names, as well as indices. - ->>> s.{stat_func}(level='blooded') -blooded -warm {level_output_0} -cold {level_output_1} -Name: legs, dtype: int64 - ->>> s.{stat_func}(level=0) -blooded -warm {level_output_0} -cold {level_output_1} -Name: legs, dtype: int64""" +{default_output}""" _sum_examples = _shared_docs["stat_func_example"].format( stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them ref #40869, cc @phofl
https://api.github.com/repos/pandas-dev/pandas/pulls/41394
2021-05-09T14:13:46Z
2021-05-10T23:21:43Z
2021-05-10T23:21:42Z
2021-05-11T02:00:34Z
[ArrowStringArray] TST: parametrize str.extract tests
diff --git a/pandas/tests/strings/test_extract.py b/pandas/tests/strings/test_extract.py index c1564a5c256a1..09a40728500e9 100644 --- a/pandas/tests/strings/test_extract.py +++ b/pandas/tests/strings/test_extract.py @@ -13,30 +13,31 @@ ) -def test_extract_expand_None(): - values = Series(["fooBAD__barBAD", np.nan, "foo"]) +def test_extract_expand_kwarg_wrong_type_raises(any_string_dtype): + # TODO: should this raise TypeError + values = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype) with pytest.raises(ValueError, match="expand must be True or False"): values.str.extract(".*(BAD[_]+).*(BAD)", expand=None) -def test_extract_expand_unspecified(): - values = Series(["fooBAD__barBAD", np.nan, "foo"]) - result_unspecified = values.str.extract(".*(BAD[_]+).*") - assert isinstance(result_unspecified, DataFrame) - result_true = values.str.extract(".*(BAD[_]+).*", expand=True) - tm.assert_frame_equal(result_unspecified, result_true) +def test_extract_expand_kwarg(any_string_dtype): + s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype) + expected = DataFrame(["BAD__", np.nan, np.nan], dtype=any_string_dtype) + result = s.str.extract(".*(BAD[_]+).*") + tm.assert_frame_equal(result, expected) -def test_extract_expand_False(): - # Contains tests like those in test_match and some others. - values = Series(["fooBAD__barBAD", np.nan, "foo"]) - er = [np.nan, np.nan] # empty row + result = s.str.extract(".*(BAD[_]+).*", expand=True) + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + [["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype + ) + result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=False) + tm.assert_frame_equal(result, expected) - result = values.str.extract(".*(BAD[_]+).*(BAD)", expand=False) - exp = DataFrame([["BAD__", "BAD"], er, er]) - tm.assert_frame_equal(result, exp) - # mixed +def test_extract_expand_False_mixed_object(): mixed = Series( [ "aBAD_BAD", @@ -51,163 +52,175 @@ def test_extract_expand_False(): ] ) - rs = Series(mixed).str.extract(".*(BAD[_]+).*(BAD)", expand=False) - exp = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er]) - tm.assert_frame_equal(rs, exp) - - # unicode - values = Series(["fooBAD__barBAD", np.nan, "foo"]) + result = Series(mixed).str.extract(".*(BAD[_]+).*(BAD)", expand=False) + er = [np.nan, np.nan] # empty row + expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er]) + tm.assert_frame_equal(result, expected) - result = values.str.extract(".*(BAD[_]+).*(BAD)", expand=False) - exp = DataFrame([["BAD__", "BAD"], er, er]) - tm.assert_frame_equal(result, exp) +def test_extract_expand_index_raises(): # GH9980 # Index only works with one regex group since # multi-group would expand to a frame idx = Index(["A1", "A2", "A3", "A4", "B5"]) - with pytest.raises(ValueError, match="supported"): + msg = "only one regex group is supported with Index" + with pytest.raises(ValueError, match=msg): idx.str.extract("([AB])([123])", expand=False) - # these should work for both Series and Index - for klass in [Series, Index]: - # no groups - s_or_idx = klass(["A1", "B2", "C3"]) - msg = "pattern contains no capture groups" - with pytest.raises(ValueError, match=msg): - s_or_idx.str.extract("[ABC][123]", expand=False) - - # only non-capturing groups - with pytest.raises(ValueError, match=msg): - s_or_idx.str.extract("(?:[AB]).*", expand=False) - - # single group renames series/index properly - s_or_idx = klass(["A1", "A2"]) - result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=False) - assert result.name == "uno" - - exp = klass(["A", "A"], name="uno") - if klass == Series: - tm.assert_series_equal(result, exp) - else: - tm.assert_index_equal(result, exp) - - s = Series(["A1", "B2", "C3"]) + +def test_extract_expand_no_capture_groups_raises(index_or_series, any_string_dtype): + s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype) + msg = "pattern contains no capture groups" + + # no groups + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract("[ABC][123]", expand=False) + + # only non-capturing groups + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract("(?:[AB]).*", expand=False) + + +def test_extract_expand_single_capture_group(index_or_series, any_string_dtype): + # single group renames series/index properly + s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype) + result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=False) + + expected = index_or_series(["A", "A"], name="uno", dtype=any_string_dtype) + if index_or_series == Series: + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + + +def test_extract_expand_capture_groups(any_string_dtype): + s = Series(["A1", "B2", "C3"], dtype=any_string_dtype) # one group, no matches result = s.str.extract("(_)", expand=False) - exp = Series([np.nan, np.nan, np.nan], dtype=object) - tm.assert_series_equal(result, exp) + expected = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) # two groups, no matches result = s.str.extract("(_)(_)", expand=False) - exp = DataFrame( - [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=object + expected = DataFrame( + [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype ) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) # one group, some matches result = s.str.extract("([AB])[123]", expand=False) - exp = Series(["A", "B", np.nan]) - tm.assert_series_equal(result, exp) + expected = Series(["A", "B", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) # two groups, some matches result = s.str.extract("([AB])([123])", expand=False) - exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]]) - tm.assert_frame_equal(result, exp) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) # one named group result = s.str.extract("(?P<letter>[AB])", expand=False) - exp = Series(["A", "B", np.nan], name="letter") - tm.assert_series_equal(result, exp) + expected = Series(["A", "B", np.nan], name="letter", dtype=any_string_dtype) + tm.assert_series_equal(result, expected) # two named groups result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=False) - exp = DataFrame( - [["A", "1"], ["B", "2"], [np.nan, np.nan]], columns=["letter", "number"] + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], + columns=["letter", "number"], + dtype=any_string_dtype, ) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) # mix named and unnamed groups result = s.str.extract("([AB])(?P<number>[123])", expand=False) - exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]], columns=[0, "number"]) - tm.assert_frame_equal(result, exp) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], + columns=[0, "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) # one normal group, one non-capturing group result = s.str.extract("([AB])(?:[123])", expand=False) - exp = Series(["A", "B", np.nan]) - tm.assert_series_equal(result, exp) + expected = Series(["A", "B", np.nan], dtype=any_string_dtype) + tm.assert_series_equal(result, expected) # two normal groups, one non-capturing group - result = Series(["A11", "B22", "C33"]).str.extract( - "([AB])([123])(?:[123])", expand=False + s = Series(["A11", "B22", "C33"], dtype=any_string_dtype) + result = s.str.extract("([AB])([123])(?:[123])", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype ) - exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]]) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) # one optional group followed by one normal group - result = Series(["A1", "B2", "3"]).str.extract( - "(?P<letter>[AB])?(?P<number>[123])", expand=False - ) - exp = DataFrame( - [["A", "1"], ["B", "2"], [np.nan, "3"]], columns=["letter", "number"] + s = Series(["A1", "B2", "3"], dtype=any_string_dtype) + result = s.str.extract("(?P<letter>[AB])?(?P<number>[123])", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, "3"]], + columns=["letter", "number"], + dtype=any_string_dtype, ) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) # one normal group followed by one optional group - result = Series(["A1", "B2", "C"]).str.extract( - "(?P<letter>[ABC])(?P<number>[123])?", expand=False - ) - exp = DataFrame( - [["A", "1"], ["B", "2"], ["C", np.nan]], columns=["letter", "number"] + s = Series(["A1", "B2", "C"], dtype=any_string_dtype) + result = s.str.extract("(?P<letter>[ABC])(?P<number>[123])?", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], ["C", np.nan]], + columns=["letter", "number"], + dtype=any_string_dtype, ) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) - # GH6348 + +def test_extract_expand_capture_groups_index(index, any_string_dtype): + # https://github.com/pandas-dev/pandas/issues/6348 # not passing index to the extractor - def check_index(index): - data = ["A1", "B2", "C"] - index = index[: len(data)] - s = Series(data, index=index) - result = s.str.extract(r"(\d)", expand=False) - exp = Series(["1", "2", np.nan], index=index) - tm.assert_series_equal(result, exp) - - result = Series(data, index=index).str.extract( - r"(?P<letter>\D)(?P<number>\d)?", expand=False - ) - e_list = [["A", "1"], ["B", "2"], ["C", np.nan]] - exp = DataFrame(e_list, columns=["letter", "number"], index=index) - tm.assert_frame_equal(result, exp) - - i_funs = [ - tm.makeStringIndex, - tm.makeUnicodeIndex, - tm.makeIntIndex, - tm.makeDateIndex, - tm.makePeriodIndex, - tm.makeRangeIndex, - ] - for index in i_funs: - check_index(index()) + data = ["A1", "B2", "C"] + + if len(index) < len(data): + pytest.skip("Index too short") + + index = index[: len(data)] + s = Series(data, index=index, dtype=any_string_dtype) + + result = s.str.extract(r"(\d)", expand=False) + expected = Series(["1", "2", np.nan], index=index, dtype=any_string_dtype) + tm.assert_series_equal(result, expected) + + result = s.str.extract(r"(?P<letter>\D)(?P<number>\d)?", expand=False) + expected = DataFrame( + [["A", "1"], ["B", "2"], ["C", np.nan]], + columns=["letter", "number"], + index=index, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + - # single_series_name_is_preserved. - s = Series(["a3", "b3", "c2"], name="bob") - r = s.str.extract(r"(?P<sue>[a-z])", expand=False) - e = Series(["a", "b", "c"], name="sue") - tm.assert_series_equal(r, e) - assert r.name == e.name +def test_extract_single_series_name_is_preserved(any_string_dtype): + s = Series(["a3", "b3", "c2"], name="bob", dtype=any_string_dtype) + result = s.str.extract(r"(?P<sue>[a-z])", expand=False) + expected = Series(["a", "b", "c"], name="sue", dtype=any_string_dtype) + tm.assert_series_equal(result, expected) -def test_extract_expand_True(): +def test_extract_expand_True(any_string_dtype): # Contains tests like those in test_match and some others. - values = Series(["fooBAD__barBAD", np.nan, "foo"]) - er = [np.nan, np.nan] # empty row + s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype) + + result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=True) + expected = DataFrame( + [["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) - result = values.str.extract(".*(BAD[_]+).*(BAD)", expand=True) - exp = DataFrame([["BAD__", "BAD"], er, er]) - tm.assert_frame_equal(result, exp) - # mixed +def test_extract_expand_True_mixed_object(): + er = [np.nan, np.nan] # empty row mixed = Series( [ "aBAD_BAD", @@ -222,140 +235,158 @@ def test_extract_expand_True(): ] ) - rs = Series(mixed).str.extract(".*(BAD[_]+).*(BAD)", expand=True) - exp = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er]) - tm.assert_frame_equal(rs, exp) + result = mixed.str.extract(".*(BAD[_]+).*(BAD)", expand=True) + expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er]) + tm.assert_frame_equal(result, expected) + +def test_extract_expand_True_single_capture_group_raises( + index_or_series, any_string_dtype +): # these should work for both Series and Index - for klass in [Series, Index]: - # no groups - s_or_idx = klass(["A1", "B2", "C3"]) - msg = "pattern contains no capture groups" - with pytest.raises(ValueError, match=msg): - s_or_idx.str.extract("[ABC][123]", expand=True) - - # only non-capturing groups - with pytest.raises(ValueError, match=msg): - s_or_idx.str.extract("(?:[AB]).*", expand=True) - - # single group renames series/index properly - s_or_idx = klass(["A1", "A2"]) - result_df = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=True) - assert isinstance(result_df, DataFrame) - result_series = result_df["uno"] - tm.assert_series_equal(result_series, Series(["A", "A"], name="uno")) - - -def test_extract_series(): - # extract should give the same result whether or not the - # series has a name. - for series_name in None, "series_name": - s = Series(["A1", "B2", "C3"], name=series_name) - # one group, no matches - result = s.str.extract("(_)", expand=True) - exp = DataFrame([np.nan, np.nan, np.nan], dtype=object) - tm.assert_frame_equal(result, exp) - - # two groups, no matches - result = s.str.extract("(_)(_)", expand=True) - exp = DataFrame( - [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=object - ) - tm.assert_frame_equal(result, exp) - - # one group, some matches - result = s.str.extract("([AB])[123]", expand=True) - exp = DataFrame(["A", "B", np.nan]) - tm.assert_frame_equal(result, exp) - - # two groups, some matches - result = s.str.extract("([AB])([123])", expand=True) - exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]]) - tm.assert_frame_equal(result, exp) - - # one named group - result = s.str.extract("(?P<letter>[AB])", expand=True) - exp = DataFrame({"letter": ["A", "B", np.nan]}) - tm.assert_frame_equal(result, exp) - - # two named groups - result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=True) - e_list = [["A", "1"], ["B", "2"], [np.nan, np.nan]] - exp = DataFrame(e_list, columns=["letter", "number"]) - tm.assert_frame_equal(result, exp) - - # mix named and unnamed groups - result = s.str.extract("([AB])(?P<number>[123])", expand=True) - exp = DataFrame(e_list, columns=[0, "number"]) - tm.assert_frame_equal(result, exp) - - # one normal group, one non-capturing group - result = s.str.extract("([AB])(?:[123])", expand=True) - exp = DataFrame(["A", "B", np.nan]) - tm.assert_frame_equal(result, exp) - - -def test_extract_optional_groups(): + # no groups + s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype) + msg = "pattern contains no capture groups" + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract("[ABC][123]", expand=True) + + # only non-capturing groups + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract("(?:[AB]).*", expand=True) + + +def test_extract_expand_True_single_capture_group(index_or_series, any_string_dtype): + # single group renames series/index properly + s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype) + result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=True) + expected_dtype = "object" if index_or_series is Index else any_string_dtype + expected = DataFrame({"uno": ["A", "A"]}, dtype=expected_dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("name", [None, "series_name"]) +def test_extract_series(name, any_string_dtype): + # extract should give the same result whether or not the series has a name. + s = Series(["A1", "B2", "C3"], name=name, dtype=any_string_dtype) + + # one group, no matches + result = s.str.extract("(_)", expand=True) + expected = DataFrame([np.nan, np.nan, np.nan], dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + # two groups, no matches + result = s.str.extract("(_)(_)", expand=True) + expected = DataFrame( + [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # one group, some matches + result = s.str.extract("([AB])[123]", expand=True) + expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + # two groups, some matches + result = s.str.extract("([AB])([123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + # one named group + result = s.str.extract("(?P<letter>[AB])", expand=True) + expected = DataFrame({"letter": ["A", "B", np.nan]}, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + # two named groups + result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], + columns=["letter", "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + # mix named and unnamed groups + result = s.str.extract("([AB])(?P<number>[123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], + columns=[0, "number"], + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) + + # one normal group, one non-capturing group + result = s.str.extract("([AB])(?:[123])", expand=True) + expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + +def test_extract_optional_groups(any_string_dtype): # two normal groups, one non-capturing group - result = Series(["A11", "B22", "C33"]).str.extract( - "([AB])([123])(?:[123])", expand=True + s = Series(["A11", "B22", "C33"], dtype=any_string_dtype) + result = s.str.extract("([AB])([123])(?:[123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype ) - exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]]) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) # one optional group followed by one normal group - result = Series(["A1", "B2", "3"]).str.extract( - "(?P<letter>[AB])?(?P<number>[123])", expand=True + s = Series(["A1", "B2", "3"], dtype=any_string_dtype) + result = s.str.extract("(?P<letter>[AB])?(?P<number>[123])", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], [np.nan, "3"]], + columns=["letter", "number"], + dtype=any_string_dtype, ) - e_list = [["A", "1"], ["B", "2"], [np.nan, "3"]] - exp = DataFrame(e_list, columns=["letter", "number"]) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) # one normal group followed by one optional group - result = Series(["A1", "B2", "C"]).str.extract( - "(?P<letter>[ABC])(?P<number>[123])?", expand=True + s = Series(["A1", "B2", "C"], dtype=any_string_dtype) + result = s.str.extract("(?P<letter>[ABC])(?P<number>[123])?", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], ["C", np.nan]], + columns=["letter", "number"], + dtype=any_string_dtype, ) - e_list = [["A", "1"], ["B", "2"], ["C", np.nan]] - exp = DataFrame(e_list, columns=["letter", "number"]) - tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, expected) + +def test_extract_dataframe_capture_groups_index(index, any_string_dtype): # GH6348 # not passing index to the extractor - def check_index(index): - data = ["A1", "B2", "C"] - index = index[: len(data)] - result = Series(data, index=index).str.extract(r"(\d)", expand=True) - exp = DataFrame(["1", "2", np.nan], index=index) - tm.assert_frame_equal(result, exp) - - result = Series(data, index=index).str.extract( - r"(?P<letter>\D)(?P<number>\d)?", expand=True - ) - e_list = [["A", "1"], ["B", "2"], ["C", np.nan]] - exp = DataFrame(e_list, columns=["letter", "number"], index=index) - tm.assert_frame_equal(result, exp) - - i_funs = [ - tm.makeStringIndex, - tm.makeUnicodeIndex, - tm.makeIntIndex, - tm.makeDateIndex, - tm.makePeriodIndex, - tm.makeRangeIndex, - ] - for index in i_funs: - check_index(index()) + + data = ["A1", "B2", "C"] + + if len(index) < len(data): + pytest.skip("Index too short") + + index = index[: len(data)] + s = Series(data, index=index, dtype=any_string_dtype) + + result = s.str.extract(r"(\d)", expand=True) + expected = DataFrame(["1", "2", np.nan], index=index, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) + + result = s.str.extract(r"(?P<letter>\D)(?P<number>\d)?", expand=True) + expected = DataFrame( + [["A", "1"], ["B", "2"], ["C", np.nan]], + columns=["letter", "number"], + index=index, + dtype=any_string_dtype, + ) + tm.assert_frame_equal(result, expected) -def test_extract_single_group_returns_frame(): +def test_extract_single_group_returns_frame(any_string_dtype): # GH11386 extract should always return DataFrame, even when # there is only one group. Prior to v0.18.0, extract returned # Series when there was only one group in the regex. - s = Series(["a3", "b3", "c2"], name="series_name") - r = s.str.extract(r"(?P<letter>[a-z])", expand=True) - e = DataFrame({"letter": ["a", "b", "c"]}) - tm.assert_frame_equal(r, e) + s = Series(["a3", "b3", "c2"], name="series_name", dtype=any_string_dtype) + result = s.str.extract(r"(?P<letter>[a-z])", expand=True) + expected = DataFrame({"letter": ["a", "b", "c"]}, dtype=any_string_dtype) + tm.assert_frame_equal(result, expected) def test_extractall():
https://api.github.com/repos/pandas-dev/pandas/pulls/41393
2021-05-09T13:54:19Z
2021-05-12T01:12:50Z
2021-05-12T01:12:50Z
2021-05-12T08:03:31Z
[ArrowStringArray] TST: parametrize str.split tests
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index 45a9053954569..79ea2a4fba284 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -230,16 +230,21 @@ def time_contains(self, dtype, regex): class Split: - params = [True, False] - param_names = ["expand"] + params = (["str", "string", "arrow_string"], [True, False]) + param_names = ["dtype", "expand"] + + def setup(self, dtype, expand): + from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401 - def setup(self, expand): - self.s = Series(tm.makeStringIndex(10 ** 5)).str.join("--") + try: + self.s = Series(tm.makeStringIndex(10 ** 5), dtype=dtype).str.join("--") + except ImportError: + raise NotImplementedError - def time_split(self, expand): + def time_split(self, dtype, expand): self.s.str.split("--", expand=expand) - def time_rsplit(self, expand): + def time_rsplit(self, dtype, expand): self.s.str.rsplit("--", expand=expand) diff --git a/pandas/tests/strings/test_split_partition.py b/pandas/tests/strings/test_split_partition.py index f8804d6dd6266..e59105eccc67c 100644 --- a/pandas/tests/strings/test_split_partition.py +++ b/pandas/tests/strings/test_split_partition.py @@ -13,22 +13,29 @@ ) -def test_split(): - values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"]) +def test_split(any_string_dtype): + values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) result = values.str.split("_") exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]]) tm.assert_series_equal(result, exp) # more than one char - values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"]) + values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype) result = values.str.split("__") tm.assert_series_equal(result, exp) result = values.str.split("__", expand=False) tm.assert_series_equal(result, exp) - # mixed + # regex split + values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype) + result = values.str.split("[,_]") + exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]]) + tm.assert_series_equal(result, exp) + + +def test_split_object_mixed(): mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0]) result = mixed.str.split("_") exp = Series( @@ -50,17 +57,10 @@ def test_split(): assert isinstance(result, Series) tm.assert_almost_equal(result, exp) - # regex split - values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"]) - result = values.str.split("[,_]") - exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]]) - tm.assert_series_equal(result, exp) - -@pytest.mark.parametrize("dtype", [object, "string"]) @pytest.mark.parametrize("method", ["split", "rsplit"]) -def test_split_n(dtype, method): - s = Series(["a b", pd.NA, "b c"], dtype=dtype) +def test_split_n(any_string_dtype, method): + s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype) expected = Series([["a", "b"], pd.NA, ["b", "c"]]) result = getattr(s.str, method)(" ", n=None) @@ -70,20 +70,34 @@ def test_split_n(dtype, method): tm.assert_series_equal(result, expected) -def test_rsplit(): - values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"]) +def test_rsplit(any_string_dtype): + values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) result = values.str.rsplit("_") exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]]) tm.assert_series_equal(result, exp) # more than one char - values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"]) + values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype) result = values.str.rsplit("__") tm.assert_series_equal(result, exp) result = values.str.rsplit("__", expand=False) tm.assert_series_equal(result, exp) + # regex split is not supported by rsplit + values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype) + result = values.str.rsplit("[,_]") + exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]]) + tm.assert_series_equal(result, exp) + + # setting max number of splits, make sure it's from reverse + values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype) + result = values.str.rsplit("_", n=1) + exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]]) + tm.assert_series_equal(result, exp) + + +def test_rsplit_object_mixed(): # mixed mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0]) result = mixed.str.rsplit("_") @@ -106,27 +120,15 @@ def test_rsplit(): assert isinstance(result, Series) tm.assert_almost_equal(result, exp) - # regex split is not supported by rsplit - values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"]) - result = values.str.rsplit("[,_]") - exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]]) - tm.assert_series_equal(result, exp) - # setting max number of splits, make sure it's from reverse - values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"]) - result = values.str.rsplit("_", n=1) - exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]]) - tm.assert_series_equal(result, exp) - - -def test_split_blank_string(): +def test_split_blank_string(any_string_dtype): # expand blank split GH 20067 - values = Series([""], name="test") + values = Series([""], name="test", dtype=any_string_dtype) result = values.str.split(expand=True) - exp = DataFrame([[]]) # NOTE: this is NOT an empty DataFrame + exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df tm.assert_frame_equal(result, exp) - values = Series(["a b c", "a b", "", " "], name="test") + values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype) result = values.str.split(expand=True) exp = DataFrame( [ @@ -134,14 +136,15 @@ def test_split_blank_string(): ["a", "b", np.nan], [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan], - ] + ], + dtype=any_string_dtype, ) tm.assert_frame_equal(result, exp) -def test_split_noargs(): +def test_split_noargs(any_string_dtype): # #1859 - s = Series(["Wes McKinney", "Travis Oliphant"]) + s = Series(["Wes McKinney", "Travis Oliphant"], dtype=any_string_dtype) result = s.str.split() expected = ["Travis", "Oliphant"] assert result[1] == expected @@ -149,44 +152,64 @@ def test_split_noargs(): assert result[1] == expected -def test_split_maxsplit(): +@pytest.mark.parametrize( + "data, pat", + [ + (["bd asdf jfg", "kjasdflqw asdfnfk"], None), + (["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"), + (["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"), + ], +) +def test_split_maxsplit(data, pat, any_string_dtype): # re.split 0, str.split -1 - s = Series(["bd asdf jfg", "kjasdflqw asdfnfk"]) - - result = s.str.split(n=-1) - xp = s.str.split() - tm.assert_series_equal(result, xp) + s = Series(data, dtype=any_string_dtype) - result = s.str.split(n=0) + result = s.str.split(pat=pat, n=-1) + xp = s.str.split(pat=pat) tm.assert_series_equal(result, xp) - xp = s.str.split("asdf") - result = s.str.split("asdf", n=0) + result = s.str.split(pat=pat, n=0) tm.assert_series_equal(result, xp) - result = s.str.split("asdf", n=-1) - tm.assert_series_equal(result, xp) - -def test_split_no_pat_with_nonzero_n(): - s = Series(["split once", "split once too!"]) - result = s.str.split(n=1) - expected = Series({0: ["split", "once"], 1: ["split", "once too!"]}) +@pytest.mark.parametrize( + "data, pat, expected", + [ + ( + ["split once", "split once too!"], + None, + Series({0: ["split", "once"], 1: ["split", "once too!"]}), + ), + ( + ["split_once", "split_once_too!"], + "_", + Series({0: ["split", "once"], 1: ["split", "once_too!"]}), + ), + ], +) +def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype): + s = Series(data, dtype=any_string_dtype) + result = s.str.split(pat=pat, n=1) tm.assert_series_equal(expected, result, check_index_type=False) -def test_split_to_dataframe(): - s = Series(["nosplit", "alsonosplit"]) +def test_split_to_dataframe(any_string_dtype): + s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype) result = s.str.split("_", expand=True) - exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}) + exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)}) tm.assert_frame_equal(result, exp) - s = Series(["some_equal_splits", "with_no_nans"]) + s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype) result = s.str.split("_", expand=True) - exp = DataFrame({0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]}) + exp = DataFrame( + {0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]}, + dtype=any_string_dtype, + ) tm.assert_frame_equal(result, exp) - s = Series(["some_unequal_splits", "one_of_these_things_is_not"]) + s = Series( + ["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype + ) result = s.str.split("_", expand=True) exp = DataFrame( { @@ -196,14 +219,19 @@ def test_split_to_dataframe(): 3: [np.nan, "things"], 4: [np.nan, "is"], 5: [np.nan, "not"], - } + }, + dtype=any_string_dtype, ) tm.assert_frame_equal(result, exp) - s = Series(["some_splits", "with_index"], index=["preserve", "me"]) + s = Series( + ["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype + ) result = s.str.split("_", expand=True) exp = DataFrame( - {0: ["some", "with"], 1: ["splits", "index"]}, index=["preserve", "me"] + {0: ["some", "with"], 1: ["splits", "index"]}, + index=["preserve", "me"], + dtype=any_string_dtype, ) tm.assert_frame_equal(result, exp) @@ -250,29 +278,41 @@ def test_split_to_multiindex_expand(): idx.str.split("_", expand="not_a_boolean") -def test_rsplit_to_dataframe_expand(): - s = Series(["nosplit", "alsonosplit"]) +def test_rsplit_to_dataframe_expand(any_string_dtype): + s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype) result = s.str.rsplit("_", expand=True) - exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}) + exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype) tm.assert_frame_equal(result, exp) - s = Series(["some_equal_splits", "with_no_nans"]) + s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype) result = s.str.rsplit("_", expand=True) - exp = DataFrame({0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]}) + exp = DataFrame( + {0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]}, + dtype=any_string_dtype, + ) tm.assert_frame_equal(result, exp) result = s.str.rsplit("_", expand=True, n=2) - exp = DataFrame({0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]}) + exp = DataFrame( + {0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]}, + dtype=any_string_dtype, + ) tm.assert_frame_equal(result, exp) result = s.str.rsplit("_", expand=True, n=1) - exp = DataFrame({0: ["some_equal", "with_no"], 1: ["splits", "nans"]}) + exp = DataFrame( + {0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype + ) tm.assert_frame_equal(result, exp) - s = Series(["some_splits", "with_index"], index=["preserve", "me"]) + s = Series( + ["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype + ) result = s.str.rsplit("_", expand=True) exp = DataFrame( - {0: ["some", "with"], 1: ["splits", "index"]}, index=["preserve", "me"] + {0: ["some", "with"], 1: ["splits", "index"]}, + index=["preserve", "me"], + dtype=any_string_dtype, ) tm.assert_frame_equal(result, exp) @@ -297,30 +337,35 @@ def test_rsplit_to_multiindex_expand(): assert result.nlevels == 2 -def test_split_nan_expand(): +def test_split_nan_expand(any_string_dtype): # gh-18450 - s = Series(["foo,bar,baz", np.nan]) + s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype) result = s.str.split(",", expand=True) - exp = DataFrame([["foo", "bar", "baz"], [np.nan, np.nan, np.nan]]) + exp = DataFrame( + [["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype + ) tm.assert_frame_equal(result, exp) - # check that these are actually np.nan and not None + # check that these are actually np.nan/pd.NA and not None # TODO see GH 18463 # tm.assert_frame_equal does not differentiate - assert all(np.isnan(x) for x in result.iloc[1]) + if any_string_dtype == "object": + assert all(np.isnan(x) for x in result.iloc[1]) + else: + assert all(x is pd.NA for x in result.iloc[1]) -def test_split_with_name(): +def test_split_with_name(any_string_dtype): # GH 12617 # should preserve name - s = Series(["a,b", "c,d"], name="xxx") + s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype) res = s.str.split(",") exp = Series([["a", "b"], ["c", "d"]], name="xxx") tm.assert_series_equal(res, exp) res = s.str.split(",", expand=True) - exp = DataFrame([["a", "b"], ["c", "d"]]) + exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype) tm.assert_frame_equal(res, exp) idx = Index(["a,b", "c,d"], name="xxx")
the test (and benchmark) changes broken off from #41085 as a precursor to #41085 and #41372 (which currently makes changes to the str.split path, although I may break that PR up also)
https://api.github.com/repos/pandas-dev/pandas/pulls/41392
2021-05-09T10:13:04Z
2021-05-10T11:58:53Z
2021-05-10T11:58:53Z
2021-05-10T12:14:31Z
REF: add internal mechanics to separate index/columns sparsification in Styler
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index bd768f4f0a1d4..6917daaede2c6 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -132,11 +132,33 @@ def _compute(self): r = func(self)(*args, **kwargs) return r - def _translate(self): + def _translate( + self, sparsify_index: bool | None = None, sparsify_cols: bool | None = None + ): """ - Convert the DataFrame in `self.data` and the attrs from `_build_styles` - into a dictionary of {head, body, uuid, cellstyle}. + Process Styler data and settings into a dict for template rendering. + + Convert data and settings from ``Styler`` attributes such as ``self.data``, + ``self.tooltips`` including applying any methods in ``self._todo``. + + Parameters + ---------- + sparsify_index : bool, optional + Whether to sparsify the index or print all hierarchical index elements + sparsify_cols : bool, optional + Whether to sparsify the columns or print all hierarchical column elements + + Returns + ------- + d : dict + The following structure: {uuid, table_styles, caption, head, body, + cellstyle, table_attributes} """ + if sparsify_index is None: + sparsify_index = get_option("display.multi_sparse") + if sparsify_cols is None: + sparsify_cols = get_option("display.multi_sparse") + ROW_HEADING_CLASS = "row_heading" COL_HEADING_CLASS = "col_heading" INDEX_NAME_CLASS = "index_name" @@ -153,14 +175,14 @@ def _translate(self): } head = self._translate_header( - BLANK_CLASS, BLANK_VALUE, INDEX_NAME_CLASS, COL_HEADING_CLASS + BLANK_CLASS, BLANK_VALUE, INDEX_NAME_CLASS, COL_HEADING_CLASS, sparsify_cols ) d.update({"head": head}) self.cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict( list ) - body = self._translate_body(DATA_CLASS, ROW_HEADING_CLASS) + body = self._translate_body(DATA_CLASS, ROW_HEADING_CLASS, sparsify_index) d.update({"body": body}) cellstyle: list[dict[str, CSSList | list[str]]] = [ @@ -185,10 +207,17 @@ def _translate(self): return d def _translate_header( - self, blank_class, blank_value, index_name_class, col_heading_class + self, + blank_class: str, + blank_value: str, + index_name_class: str, + col_heading_class: str, + sparsify_cols: bool, ): """ - Build each <tr> within table <head>, using the structure: + Build each <tr> within table <head> as a list + + Using the structure: +----------------------------+---------------+---------------------------+ | index_blanks ... | column_name_0 | column_headers (level_0) | 1) | .. | .. | .. | @@ -196,9 +225,29 @@ def _translate_header( +----------------------------+---------------+---------------------------+ 2) | index_names (level_0 to level_n) ... | column_blanks ... | +----------------------------+---------------+---------------------------+ + + Parameters + ---------- + blank_class : str + CSS class added to elements within blank sections of the structure. + blank_value : str + HTML display value given to elements within blank sections of the structure. + index_name_class : str + CSS class added to elements within the index_names section of the structure. + col_heading_class : str + CSS class added to elements within the column_names section of structure. + sparsify_cols : bool + Whether column_headers section will add colspan attributes (>1) to elements. + + Returns + ------- + head : list + The associated HTML elements needed for template rendering. """ # for sparsifying a MultiIndex - col_lengths = _get_level_lengths(self.columns, self.hidden_columns) + col_lengths = _get_level_lengths( + self.columns, sparsify_cols, self.hidden_columns + ) clabels = self.data.columns.tolist() if self.data.columns.nlevels == 1: @@ -268,18 +317,36 @@ def _translate_header( return head - def _translate_body(self, data_class, row_heading_class): + def _translate_body( + self, data_class: str, row_heading_class: str, sparsify_index: bool + ): """ - Build each <tr> in table <body> in the following format: + Build each <tr> within table <body> as a list + + Use the following structure: +--------------------------------------------+---------------------------+ | index_header_0 ... index_header_n | data_by_column | +--------------------------------------------+---------------------------+ Also add elements to the cellstyle_map for more efficient grouped elements in <style></style> block + + Parameters + ---------- + data_class : str + CSS class added to elements within data_by_column sections of the structure. + row_heading_class : str + CSS class added to elements within the index_header section of structure. + sparsify_index : bool + Whether index_headers section will add rowspan attributes (>1) to elements. + + Returns + ------- + body : list + The associated HTML elements needed for template rendering. """ # for sparsifying a MultiIndex - idx_lengths = _get_level_lengths(self.index) + idx_lengths = _get_level_lengths(self.index, sparsify_index) rlabels = self.data.index.tolist() if self.data.index.nlevels == 1: @@ -520,14 +587,26 @@ def _element( } -def _get_level_lengths(index, hidden_elements=None): +def _get_level_lengths( + index: Index, sparsify: bool, hidden_elements: Sequence[int] | None = None +): """ Given an index, find the level length for each element. - Optional argument is a list of index positions which - should not be visible. + Parameters + ---------- + index : Index + Index or columns to determine lengths of each element + sparsify : bool + Whether to hide or show each distinct element in a MultiIndex + hidden_elements : sequence of int + Index positions of elements hidden from display in the index affecting + length - Result is a dictionary of (level, initial_position): span + Returns + ------- + Dict : + Result is a dictionary of (level, initial_position): span """ if isinstance(index, MultiIndex): levels = index.format(sparsify=lib.no_default, adjoin=False) @@ -546,7 +625,7 @@ def _get_level_lengths(index, hidden_elements=None): for i, lvl in enumerate(levels): for j, row in enumerate(lvl): - if not get_option("display.multi_sparse"): + if not sparsify: lengths[(i, j)] = 1 elif (row is not lib.no_default) and (j not in hidden_elements): last_label = j diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index 855def916c2cd..31877b3f33482 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -844,7 +844,24 @@ def test_get_level_lengths(self): (1, 4): 1, (1, 5): 1, } - result = _get_level_lengths(index) + result = _get_level_lengths(index, sparsify=True) + tm.assert_dict_equal(result, expected) + + expected = { + (0, 0): 1, + (0, 1): 1, + (0, 2): 1, + (0, 3): 1, + (0, 4): 1, + (0, 5): 1, + (1, 0): 1, + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 1, + (1, 5): 1, + } + result = _get_level_lengths(index, sparsify=False) tm.assert_dict_equal(result, expected) def test_get_level_lengths_un_sorted(self): @@ -858,7 +875,20 @@ def test_get_level_lengths_un_sorted(self): (1, 2): 1, (1, 3): 1, } - result = _get_level_lengths(index) + result = _get_level_lengths(index, sparsify=True) + tm.assert_dict_equal(result, expected) + + expected = { + (0, 0): 1, + (0, 1): 1, + (0, 2): 1, + (0, 3): 1, + (1, 0): 1, + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + } + result = _get_level_lengths(index, sparsify=False) tm.assert_dict_equal(result, expected) def test_mi_sparse(self):
This is not a user facing change. It adds only the necessary code to permit separate control of index/column MultIndex sparsification. Essentially it removes the `get_option("display.multi_sparse")` from a low level method to a higher level method where it can be overwritten by user-args later. partly addresses #41142
https://api.github.com/repos/pandas-dev/pandas/pulls/41391
2021-05-09T07:51:33Z
2021-05-12T15:03:20Z
2021-05-12T15:03:20Z
2021-05-12T16:09:17Z
REF: re-use machinery for DataFrameGroupBy.nunique
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 9287163053cac..76c53f2888a8f 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -22,7 +22,6 @@ Mapping, TypeVar, Union, - cast, ) import warnings @@ -1576,6 +1575,10 @@ def _wrap_aggregated_output( if self.axis == 1: result = result.T + if result.index.equals(self.obj.index): + # Retain e.g. DatetimeIndex/TimedeltaIndex freq + result.index = self.obj.index.copy() + # TODO: Do this more systematically return self._reindex_output(result) @@ -1627,21 +1630,21 @@ def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame: return self._reindex_output(result)._convert(datetime=True) - def _iterate_column_groupbys(self): - for i, colname in enumerate(self._selected_obj.columns): + def _iterate_column_groupbys(self, obj: FrameOrSeries): + for i, colname in enumerate(obj.columns): yield colname, SeriesGroupBy( - self._selected_obj.iloc[:, i], + obj.iloc[:, i], selection=colname, grouper=self.grouper, exclusions=self.exclusions, ) - def _apply_to_column_groupbys(self, func) -> DataFrame: + def _apply_to_column_groupbys(self, func, obj: FrameOrSeries) -> DataFrame: from pandas.core.reshape.concat import concat - columns = self._selected_obj.columns + columns = obj.columns results = [ - func(col_groupby) for _, col_groupby in self._iterate_column_groupbys() + func(col_groupby) for _, col_groupby in self._iterate_column_groupbys(obj) ] if not len(results): @@ -1728,41 +1731,21 @@ def nunique(self, dropna: bool = True) -> DataFrame: 4 ham 5 x 5 ham 5 y """ - from pandas.core.reshape.concat import concat - # TODO: this is duplicative of how GroupBy naturally works - # Try to consolidate with normal wrapping functions + if self.axis != 0: + # see test_groupby_crash_on_nunique + return self._python_agg_general(lambda sgb: sgb.nunique(dropna)) obj = self._obj_with_exclusions - if self.axis == 0: - iter_func = obj.items - else: - iter_func = obj.iterrows - - res_list = [ - SeriesGroupBy(content, selection=label, grouper=self.grouper).nunique( - dropna - ) - for label, content in iter_func() - ] - if res_list: - results = concat(res_list, axis=1) - results = cast(DataFrame, results) - else: - # concat would raise - results = DataFrame( - [], index=self.grouper.result_index, columns=obj.columns[:0] - ) - - if self.axis == 1: - results = results.T - - other_axis = 1 - self.axis - results._get_axis(other_axis).names = obj._get_axis(other_axis).names + results = self._apply_to_column_groupbys( + lambda sgb: sgb.nunique(dropna), obj=obj + ) + results.columns.names = obj.columns.names # TODO: do at higher level? if not self.as_index: results.index = ibase.default_index(len(results)) self._insert_inaxis_grouper_inplace(results) + return results @Appender(DataFrame.idxmax.__doc__) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 1105c1bd1d782..d6b0e118cc7ce 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1904,7 +1904,9 @@ def ohlc(self) -> DataFrame: ) return self._reindex_output(result) - return self._apply_to_column_groupbys(lambda x: x.ohlc()) + return self._apply_to_column_groupbys( + lambda x: x.ohlc(), self._obj_with_exclusions + ) @final @doc(DataFrame.describe) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index f716a3a44cd54..67d2af46ac8ee 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2060,24 +2060,36 @@ def test_dup_labels_output_shape(groupby_func, idx): def test_groupby_crash_on_nunique(axis): # Fix following 30253 + dti = date_range("2016-01-01", periods=2, name="foo") df = DataFrame({("A", "B"): [1, 2], ("A", "C"): [1, 3], ("D", "B"): [0, 0]}) + df.columns.names = ("bar", "baz") + df.index = dti axis_number = df._get_axis_number(axis) if not axis_number: df = df.T - result = df.groupby(axis=axis_number, level=0).nunique() + gb = df.groupby(axis=axis_number, level=0) + result = gb.nunique() - expected = DataFrame({"A": [1, 2], "D": [1, 1]}) + expected = DataFrame({"A": [1, 2], "D": [1, 1]}, index=dti) + expected.columns.name = "bar" if not axis_number: expected = expected.T tm.assert_frame_equal(result, expected) - # same thing, but empty columns - gb = df[[]].groupby(axis=axis_number, level=0) - res = gb.nunique() - exp = expected[[]] + if axis_number == 0: + # same thing, but empty columns + gb2 = df[[]].groupby(axis=axis_number, level=0) + exp = expected[[]] + else: + # same thing, but empty rows + gb2 = df.loc[[]].groupby(axis=axis_number, level=0) + # default for empty when we can't infer a dtype is float64 + exp = expected.loc[[]].astype(np.float64) + + res = gb2.nunique() tm.assert_frame_equal(res, exp) diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index 5dc64a33098f3..7cc2b7f72fb69 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -121,12 +121,8 @@ def test_aaa_group_order(): tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)), df[4::5]) -def test_aggregate_normal(request, resample_method): +def test_aggregate_normal(resample_method): """Check TimeGrouper's aggregation is identical as normal groupby.""" - if resample_method == "ohlc": - request.node.add_marker( - pytest.mark.xfail(reason="DataError: No numeric types to aggregate") - ) data = np.random.randn(20, 4) normal_df = DataFrame(data, columns=["A", "B", "C", "D"])
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41390
2021-05-09T03:28:14Z
2021-05-10T23:26:13Z
2021-05-10T23:26:13Z
2021-05-11T00:12:57Z
TST: Add regression tests for old issues
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 9e1d13eac5039..9ca7d0b465250 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -1407,3 +1407,18 @@ def test_integer_array_add_list_like( assert_function(left, expected) assert_function(right, expected) + + +def test_sub_multiindex_swapped_levels(): + # GH 9952 + df = pd.DataFrame( + {"a": np.random.randn(6)}, + index=pd.MultiIndex.from_product( + [["a", "b"], [0, 1, 2]], names=["levA", "levB"] + ), + ) + df2 = df.copy() + df2.index = df2.index.swaplevel(0, 1) + result = df - df2 + expected = pd.DataFrame([0.0] * 6, columns=["a"], index=df.index) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 7ffe2fb9ab1ff..7244624e563e3 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -729,3 +729,19 @@ def test_where_string_dtype(frame_or_series): dtype=StringDtype(), ) tm.assert_equal(result, expected) + + +def test_where_bool_comparison(): + # GH 10336 + df_mask = DataFrame( + {"AAA": [True] * 4, "BBB": [False] * 4, "CCC": [True, False, True, False]} + ) + result = df_mask.where(df_mask == False) # noqa:E712 + expected = DataFrame( + { + "AAA": np.array([np.nan] * 4, dtype=object), + "BBB": [False] * 4, + "CCC": [np.nan, False, np.nan, False], + } + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index 924059f634fca..5a87803ddc21e 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -657,3 +657,17 @@ def test_reset_index_empty_frame_with_datetime64_multiindex_from_groupby(): expected["c3"] = expected["c3"].astype("datetime64[ns]") expected["c1"] = expected["c1"].astype("float64") tm.assert_frame_equal(result, expected) + + +def test_reset_index_multiindex_nat(): + # GH 11479 + idx = range(3) + tstamp = date_range("2015-07-01", freq="D", periods=3) + df = DataFrame({"id": idx, "tstamp": tstamp, "a": list("abc")}) + df.loc[2, "tstamp"] = pd.NaT + result = df.set_index(["id", "tstamp"]).reset_index("id") + expected = DataFrame( + {"id": range(3), "a": list("abc")}, + index=pd.DatetimeIndex(["2015-07-01", "2015-07-02", "NaT"], name="tstamp"), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index ab240531a7505..03376bdce26f8 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2398,6 +2398,12 @@ def check_views(): # assert b[0] == 0 assert df.iloc[0, 2] == 0 + def test_from_series_with_name_with_columns(self): + # GH 7893 + result = DataFrame(Series(1, name="foo"), columns=["bar"]) + expected = DataFrame(columns=["bar"]) + tm.assert_frame_equal(result, expected) + class TestDataFrameConstructorWithDatetimeTZ: @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index cc4042822bc8b..365d8abcb6bac 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -1998,3 +1998,23 @@ def test_stack_nan_in_multiindex_columns(self): columns=Index([(0, None), (0, 2), (0, 3)]), ) tm.assert_frame_equal(result, expected) + + def test_stack_nan_level(self): + # GH 9406 + df_nan = DataFrame( + np.arange(4).reshape(2, 2), + columns=MultiIndex.from_tuples( + [("A", np.nan), ("B", "b")], names=["Upper", "Lower"] + ), + index=Index([0, 1], name="Num"), + dtype=np.float64, + ) + result = df_nan.stack() + expected = DataFrame( + [[0.0, np.nan], [np.nan, 1], [2.0, np.nan], [np.nan, 3.0]], + columns=Index(["A", "B"], name="Upper"), + index=MultiIndex.from_tuples( + [(0, np.nan), (0, "b"), (1, np.nan), (1, "b")], names=["Num", "Lower"] + ), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index 96d2c246dd0ee..c87efdbd35fa4 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -776,3 +776,15 @@ def test_loc_getitem_drops_levels_for_one_row_dataframe(): result = ser.loc["x", :, "z"] expected = Series([0], index=Index(["y"], name="b")) tm.assert_series_equal(result, expected) + + +def test_mi_columns_loc_list_label_order(): + # GH 10710 + cols = MultiIndex.from_product([["A", "B", "C"], [1, 2]]) + df = DataFrame(np.zeros((5, 6)), columns=cols) + result = df.loc[:, ["B", "A"]] + expected = DataFrame( + np.zeros((5, 4)), + columns=MultiIndex.from_tuples([("B", 1), ("B", 2), ("A", 1), ("A", 2)]), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/concat/test_datetimes.py b/pandas/tests/reshape/concat/test_datetimes.py index 2b8233388d328..0a1ba17949ddb 100644 --- a/pandas/tests/reshape/concat/test_datetimes.py +++ b/pandas/tests/reshape/concat/test_datetimes.py @@ -470,6 +470,14 @@ def test_concat_tz_NaT(self, t1): tm.assert_frame_equal(result, expected) + def test_concat_tz_with_empty(self): + # GH 9188 + result = concat( + [DataFrame(date_range("2000", periods=1, tz="UTC")), DataFrame()] + ) + expected = DataFrame(date_range("2000", periods=1, tz="UTC")) + tm.assert_frame_equal(result, expected) + class TestPeriodConcat: def test_concat_period_series(self): diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 0372bf740640c..675120e03d821 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -201,6 +201,14 @@ def test_setitem_slicestep(self): series[::2] = 0 assert (series[::2] == 0).all() + def test_setitem_multiindex_slice(self, indexer_sli): + # GH 8856 + mi = MultiIndex.from_product(([0, 1], list("abcde"))) + result = Series(np.arange(10, dtype=np.int64), mi) + indexer_sli(result)[::4] = 100 + expected = Series([100, 1, 2, 3, 100, 5, 6, 7, 100, 9], mi) + tm.assert_series_equal(result, expected) + class TestSetitemBooleanMask: def test_setitem_boolean(self, string_series):
- [x] closes #7893 - [x] closes #8856 - [x] closes #9188 - [x] closes #9406 - [x] closes #9952 - [x] closes #10336 - [x] closes #10710 - [x] closes #11479 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41389
2021-05-09T00:57:27Z
2021-05-10T21:45:33Z
2021-05-10T21:45:30Z
2021-05-11T00:03:19Z
TST: fix groupby xfails
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index f716a3a44cd54..39121e92dcd83 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1738,6 +1738,7 @@ def test_pivot_table_values_key_error(): ) def test_empty_groupby(columns, keys, values, method, op, request): # GH8093 & GH26411 + override_dtype = None if isinstance(values, Categorical) and len(keys) == 1 and method == "apply": mark = pytest.mark.xfail(raises=TypeError, match="'str' object is not callable") @@ -1784,12 +1785,9 @@ def test_empty_groupby(columns, keys, values, method, op, request): and op in ["sum", "prod"] and method != "apply" ): - mark = pytest.mark.xfail( - raises=AssertionError, match="(DataFrame|Series) are different" - ) - request.node.add_marker(mark) + # We expect to get Int64 back for these + override_dtype = "Int64" - override_dtype = None if isinstance(values[0], bool) and op in ("prod", "sum") and method != "apply": # sum/product of bools is an integer override_dtype = "int64" diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py index 20edf03c5b96c..6a73d540c7088 100644 --- a/pandas/tests/groupby/test_rank.py +++ b/pandas/tests/groupby/test_rank.py @@ -11,7 +11,6 @@ concat, ) import pandas._testing as tm -from pandas.core.base import DataError def test_rank_apply(): @@ -462,7 +461,6 @@ def test_rank_avg_even_vals(dtype, upper): tm.assert_frame_equal(result, exp_df) -@pytest.mark.xfail(reason="Works now, needs tests") @pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"]) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("na_option", ["keep", "top", "bottom"]) @@ -470,13 +468,25 @@ def test_rank_avg_even_vals(dtype, upper): @pytest.mark.parametrize( "vals", [["bar", "bar", "foo", "bar", "baz"], ["bar", np.nan, "foo", np.nan, "baz"]] ) -def test_rank_object_raises(ties_method, ascending, na_option, pct, vals): +def test_rank_object_dtype(ties_method, ascending, na_option, pct, vals): df = DataFrame({"key": ["foo"] * 5, "val": vals}) + mask = df["val"].isna() - with pytest.raises(DataError, match="No numeric types to aggregate"): - df.groupby("key").rank( - method=ties_method, ascending=ascending, na_option=na_option, pct=pct - ) + gb = df.groupby("key") + res = gb.rank(method=ties_method, ascending=ascending, na_option=na_option, pct=pct) + + # construct our expected by using numeric values with the same ordering + if mask.any(): + df2 = DataFrame({"key": ["foo"] * 5, "val": [0, np.nan, 2, np.nan, 1]}) + else: + df2 = DataFrame({"key": ["foo"] * 5, "val": [0, 0, 2, 0, 1]}) + + gb2 = df2.groupby("key") + alt = gb2.rank( + method=ties_method, ascending=ascending, na_option=na_option, pct=pct + ) + + tm.assert_frame_equal(res, alt) @pytest.mark.parametrize("na_option", [True, "bad", 1])
https://api.github.com/repos/pandas-dev/pandas/pulls/41387
2021-05-08T22:35:25Z
2021-05-10T20:11:14Z
2021-05-10T20:11:14Z
2021-05-10T21:23:00Z
ENH: IO support for R data files with C extension
diff --git a/LICENSES/LIBRDATA_LICENSE b/LICENSES/LIBRDATA_LICENSE new file mode 100644 index 0000000000000..4f24e6b9127ff --- /dev/null +++ b/LICENSES/LIBRDATA_LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013-2020 Evan Miller (except where otherwise noted) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index c2b030d732ba9..edcf852749deb 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -31,6 +31,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like binary;`Feather Format <https://github.com/wesm/feather>`__;:ref:`read_feather<io.feather>`;:ref:`to_feather<io.feather>` binary;`Parquet Format <https://parquet.apache.org/>`__;:ref:`read_parquet<io.parquet>`;:ref:`to_parquet<io.parquet>` binary;`ORC Format <https://orc.apache.org/>`__;:ref:`read_orc<io.orc>`; + binary;`R <https://www.r-project.org/>`__;:ref:`read_rdata<io.rdata_reader>`;:ref:`to_rdata<io.rdata_writer>` binary;`Stata <https://en.wikipedia.org/wiki/Stata>`__;:ref:`read_stata<io.stata_reader>`;:ref:`to_stata<io.stata_writer>` binary;`SAS <https://en.wikipedia.org/wiki/SAS_(software)>`__;:ref:`read_sas<io.sas_reader>`; binary;`SPSS <https://en.wikipedia.org/wiki/SPSS>`__;:ref:`read_spss<io.spss_reader>`; @@ -5927,6 +5928,407 @@ respective functions from ``pandas-gbq``. Full documentation can be found `here <https://pandas-gbq.readthedocs.io/>`__. + +.. _io.rdata: + +R data format +------------- + +.. _io.rdata_reader: + +Reading R data +'''''''''''''' + +.. versionadded:: 1.3.0 + +The top-level function ``read_rdata`` will read the native serialization types +in the R language and environment. For .RData and its synonymous shorthand, .rda, +that can hold multiple R objects, method will return a ``dict`` of ``DataFrames``. +For .rds types that only contains a single R object, method will return a ``dict`` +of a single ``DataFrame``. + +.. note:: + + Since any R object can be saved in these types, this method will only return + data.frame objects or objects coercible to data.frames including matrices, + tibbles, and data.tables. + +For more information of R serialization data types, see docs on `rds`_ +and `rda`_ data formats. + +.. _rds: https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/readRDS + +.. _rda: https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/save + +To walk through an example, consider the following generated data.frames in R +using natural environment data samples from US EPA, UK BGCI, and NOAA pubilc data. +As shown below, each data.frame is saved individually in .rds types and all together +in an .rda type. + +.. code-block:: r + + ghg_df <- data.frame( + gas = c("Carbon dioxide", "Methane", "Nitrous oxide", + "Fluorinated gases", "Total"), + year = c(2018, 2018, 2018, 2018, 2018), + emissions = c(5424.88150213288, 634.457127078267, 434.528555376666, + 182.782432461777, 6676.64961704959), + row.names = c(141:145), + stringsAsFactors = FALSE + ) + ghg_df + gas year emissions + 141 Carbon dioxide 2018 5424.8815 + 142 Methane 2018 634.4571 + 143 Nitrous oxide 2018 434.5286 + 144 Fluorinated gases 2018 182.7824 + 145 Total 2018 6676.6496 + + # SAVE SINGLE OBJECT + saveRDS(ghg_df, file="ghg_df.rds") + + plants_df <- data.frame( + plant_group = c("Pteridophytes", "Pteridophytes", "Pteridophytes", + "Pteridophytes", "Pteridophytes"), + status = c("Data Deficient", "Extinct", "Not Threatened", + "Possibly Threatened", "Threatened"), + count = c(398, 65, 1294, 408, 1275), + row.names = c(16:20), + stringsAsFactors = FALSE + ) + plants_df + plant_group status count + 16 Pteridophytes Data Deficient 398 + 17 Pteridophytes Extinct 65 + 18 Pteridophytes Not Threatened 1294 + 19 Pteridophytes Possibly Threatened 408 + 20 Pteridophytes Threatened 1275 + + # SAVE SINGLE OBJECT + saveRDS(plants_df, file="plants_df.rds") + + sea_ice_df <- data.frame( + year = c(2016, 2017, 2018, 2019, 2020), + mo = c(12, 12, 12, 12, 12), + data.type = c("Goddard", "Goddard", "Goddard", "Goddard", "NRTSI-G"), + region = c("S", "S", "S", "S", "S"), + extent = c(8.28, 9.48, 9.19, 9.41, 10.44), + area = c(5.51, 6.23, 5.59, 6.59, 6.5), + row.names = c(1012:1016), + stringsAsFactors = FALSE + ) + sea_ice_df + year mo data.type region extent area + 1012 2016 12 Goddard S 8.28 5.51 + 1013 2017 12 Goddard S 9.48 6.23 + 1014 2018 12 Goddard S 9.19 5.59 + 1015 2019 12 Goddard S 9.41 6.59 + 1016 2020 12 NRTSI-G S 10.44 6.50 + + # SAVE SINGLE OBJECT + saveRDS(sea_ice_df, file="sea_ice_df.rds") + + # SAVE MULTIPLE OBJECTS + save(ghg_df, plants_df, sea_ice_df, file="env_data_dfs.rda") + +With ``read_rdata``, you can read these above .rds and .rda files, both +generating a dictionary of DataFrame(s): + +.. ipython:: python + :suppress: + + rel_path = os.path.join("..", "pandas", "tests", "io", "data", "rdata") + file_path = os.path.abspath(rel_path) + +.. ipython:: python + + rds_file = os.path.join(file_path, "ghg_df.rds") + env_df = pd.read_rdata(rds_file) + {k: df.tail() for k, df in env_df.items()} + + rda_file = os.path.join(file_path, "env_data_dfs.rda") + env_dfs = pd.read_rdata(rda_file) + {k: df.tail() for k, df in env_dfs.items()} + +To ignore the rownames of data.frame, use option ``rownames=False``: + +.. ipython:: python + + rds_file = os.path.join(file_path, "plants_df.rds") + plants_df = pd.read_rdata(rds_file, rownames=False)["r_dataframe"].tail() + plants_df + + +To select specific objects in .rda, pass a list of names into ``select_frames``. +By default, all objects are returned. + +.. ipython:: python + + rda_file = os.path.join(file_path, "env_data_dfs.rda") + sub_env_dfs = pd.read_rdata(rda_file, select_frames=["sea_ice_df"]) + sub_env_dfs + +To read from a file-like object, read object in argument, ``path_or_buffer``: + +.. ipython:: python + + rds_file = os.path.join(file_path, "plants_df.rds") + with open(rds_file, "rb") as f: + plants_df = pd.read_rdata( + f, + file_format="rds", + )["r_dataframe"] + + plants_df + +To read from URL, pass link directly into method: + +.. ipython:: python + + url = ("https://github.com/hadley/nycflights13/" + "blob/master/data/airlines.rda?raw=true") + + airlines = pd.read_rdata(url, file_format="rda") + airlines + +To read from an Amazon S3 bucket, point to the storage path: + +.. ipython:: python + + ghcran = pd.read_rdata("s3://public-r-data/ghcran.Rdata", compression=None) + ghcran + +Also, remember if R data files do not contain any data frame object, a parsing error +will occur: + +.. code-block:: ipython + + In [610]: rds_file = pd.read_rdata("env_data_non_dfs.rda") + ... + LibrdataReaderError: Invalid file, or file has unsupported features + +Finally, please note R's ``Date`` (without time component) will translate to +``datetime64`` in pandas. Also, R's date/time field type, ``POSIXct``, that can +carry timezones will translate to UTC time in pandas. For example, in R, +the following data sample from an .rda shows date/time in 'America/Chicago' local +timezone: + +.. code-block:: r + + load("ppm_df.rda") + tail(ppm_df, 5) + date decimal_date monthly_average deseasonalized num_days std_dev_of_days unc_of_mon_mean + 612 2020-12-16 17:42:25 2020.958 414.25 414.98 30 0.47 0.17 + 613 2021-01-16 05:17:31 2021.042 415.52 415.26 29 0.44 0.16 + 614 2021-02-15 15:00:00 2021.125 416.75 415.93 28 1.02 0.37 + 615 2021-03-18 01:42:28 2021.208 417.64 416.18 28 0.86 0.31 + 616 2021-04-17 12:17:31 2021.292 419.05 416.23 24 1.12 0.44 + +In pandas, conversion shows adjustment in hours to UTC: + +.. ipython:: python + + r_dfs = pd.read_rdata(os.path.join(file_path, "ppm_df.rda")) + r_dfs["ppm_df"].tail() + +Below is summary of how ``read_rdata`` handles data types between R and pandas. + +.. list-table:: + :widths: 25 25 25 + :header-rows: 1 + + * - R types + - Conversion notes + - pandas types + * - logical + - + - bool + * - integer + - + - int64 + * - numeric + - + - float64 + * - Date + - + - datetime64[ns] + * - POSIXct + - UTC conversion + - datetime64[ns] + * - factor + - + - Categorical + * - character + - + - object + +.. _io.rdata_writer: + +Writing R data +'''''''''''''' + +.. versionadded:: 1.3.0 + +The method :func:`~pandas.core.frame.DataFrame.to_rdata` will write a DataFrame +into R data files (.RData, .rda, and .rds). + +For a single DataFrame in rds type, pass in a file or buffer in method: + +.. ipython:: python + + env_dfs["plants_df"].to_rdata("plants_df.rds") + +For a single DataFrame in RData or rda types, pass in a file or buffer in method +and optionally give it a name: + +.. ipython:: python + + env_dfs["ghg_df"].to_rdata("ghg_df.rda", rda_name="ghg_df") + +.. note:: + + While RData and rda types can hold multiple R objects, this method currently + only supports writing out a single DataFrame. + +Even write to a buffer and read its content (and be sure to adjust default +``gzip`` compression to ``compression=None``): + +.. ipython:: python + + with BytesIO() as b_io: + env_dfs["sea_ice_df"].to_rdata( + b_io, + file_format="rda", + index=False, + compression=None, + ) + print( + pd.read_rdata( + b_io.getvalue(), + file_format="rda", + rownames=False, + compression=None, + )["pandas_dataframe"].tail() + ) + +While DataFrame index will not map into R rownames, by default ``index=True`` +will output as a named column or multiple columns for MultiIndex. + +.. ipython:: python + + env_dfs["ghg_df"].rename_axis(None).to_rdata("ghg_df.rds") + + pd.read_rdata("ghg_df.rds")["r_dataframe"].tail() + +To ignore the index, use ``index=False``: + +.. ipython:: python + + env_dfs["ghg_df"].rename_axis(None).to_rdata("ghg_df.rds", index=False) + + pd.read_rdata("ghg_df.rds")["r_dataframe"].tail() + +By default, these R serialized types are compressed files in either gzip, bzip2, +or xz algorithms. Similar to R, the default ``compression`` type in this method +is "gzip" or "gz". Notice size difference of compressed and uncompressed files: + +.. ipython:: python + + env_dfs["plants_df"].to_rdata("plants_df_gz.rds") + env_dfs["plants_df"].to_rdata("plants_df_bz2.rds", compression="bz2") + env_dfs["plants_df"].to_rdata("plants_df_xz.rds", compression="xz") + env_dfs["plants_df"].to_rdata("plants_df_non_comp.rds", compression=None) + + os.stat("plants_df_gz.rds").st_size + os.stat("plants_df_bz2.rds").st_size + os.stat("plants_df_xz.rds").st_size + os.stat("plants_df_non_comp.rds").st_size + +Like other IO methods, ``storage_options`` are enabled to write to those platforms: + +.. code-block:: ipython + + env_dfs["ghg_df"].to_rdata( + "s3://path/to/my/storage/pandas_df.rda", + storage_options={"user": "xxx", "password": "???"} + ) + +.. ipython:: python + :suppress: + + os.remove("ghg_df.rds") + os.remove("ghg_df.rda") + os.remove("plants_df.rds") + os.remove("plants_df_gz.rds") + os.remove("plants_df_bz2.rds") + os.remove("plants_df_xz.rds") + os.remove("plants_df_non_comp.rds") + +Once exported, the single DataFrame can be read or loaded in R: + +.. code-block:: r + + plants_df <- readRDS("plants_df.rds") + plants_df + plant_group status count + 16 Pteridophytes Data Deficient 398 + 17 Pteridophytes Extinct 65 + 18 Pteridophytes Not Threatened 1294 + 19 Pteridophytes Possibly Threatened 408 + 20 Pteridophytes Threatened 1275 + + load("ghg_df.rda") + + mget(list=ls()) + $ghg_df + gas year emissions + 141 Carbon dioxide 2018 5424.8815 + 142 Methane 2018 634.4571 + 143 Nitrous oxide 2018 434.5286 + 144 Fluorinated gases 2018 182.7824 + 145 Total 2018 6676.6496 + +.. note:: + + R does not support all dtypes of pandas. For special dtypes, you may + have to prepare or clean data in either end (R or pandas side) to + meet your specific data needs. + +Below is summary of how ``to_rdata`` handles data types between pandas +and R in order to translate pandas simpler dtypes to R's atomic types. + +.. list-table:: + :widths: 25 25 25 + :header-rows: 1 + + * - pandas types + - Conversion notes + - R types + * - bool + - + - logical + * - any uint or int + - + - integer + * - any float + - + - numeric + * - datetime64[ns] + - + - POSIXct + * - datetime64[ns, tz] + - remove tz awareness + - POSIXct + * - timedelta + - convert to seconds + - numeric + * - object + - + - character + * - all other dtypes + - convert to string + - character + .. _io.stata: Stata format @@ -5982,6 +6384,7 @@ outside of this range, the variable is cast to ``int16``. 115 dta file format. Attempting to write *Stata* dta files with strings longer than 244 characters raises a ``ValueError``. + .. _io.stata_reader: Reading from Stata format diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index b92e414f2055e..0e7f74376125f 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -112,6 +112,109 @@ both XPath 1.0 and XSLT 1.0 are available. (:issue:`27554`) For more, see :ref:`io.xml` in the user guide on IO tools. +.. _whatsnew_130.read_to_rdata: + +Read and write R data files +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We added I/O support to read and write R data files (.RData, .rda, .rds) using +:func:`pandas.read_rdata` and :meth:`DataFrame.to_rdata`. Both methods rely on +the `librdata`_ C library to support open source data migration between R and +Python pandas. (:issue:`40287`) + +.. _librdata: https://github.com/WizardMac/librdata + +For example, consider the below generated data frame and matrix in R: + +.. code-block:: r + + In [1]: carbon_ppm_df <- data.frame( + ...: year = c(2020, 2020, 2020, 2021, 2021), + ...: month = c(10, 11, 12, 1, 2), + ...: monthly_average = c(411.51, 413.11, 414.25, 415.52, 416.75), + ...: num_days = c(30, 27, 30, 29, 28), + ...: st_dev_of_days = c(0.22, 0.8, 0.48, 0.44, 1.01), + ...: unc_mon_mean = c(0.08, 0.29, 0.17, 0.16, 0.36) + ...: ) + + In [2]: iucn_species_mtx <- matrix( + ...: c(102, 79, 159, 63, 30, 13, 267, 35, 85, + ...: 30, 10, 5, 1, 2, 7, 14, 2, 2, + ...: 409, 121, 22, 75, 40, 78, 134, 146, 28, + ...: 29, 6, 0, 0, 0, 12, 2, 1, 0, + ...: 3770, 627, 223, 365, 332, 699, 604, 663, 225, + ...: 6972, 989, 460, 730, 588, 1302, 518, 1060, 542, + ...: 7089, 1219, 798, 831, 538, 1051, 975, 719, 556, + ...: 2990, 4251, 52, 2819, 1220, 914, 1648, 1184, 845, + ...: 43885, 20685, 11158, 10865, 8492, 8192, 7326, 7212, 5940 + ...: ), + ...: ncol=9, nrow=9, + ...: dimnames = list( + ...: c("MAGNOLIOPSIDA", "ACTINOPTERYGII", "AVES", + ...: "INSECTA", "REPTILIA", "LILIOPSIDA", + ...: "GASTROPODA", "AMPHIBIA", "MAMMALIA"), + ...: c("EX", "EW", "CR(PE)", "CR(PEW)", "CR", + ...: "EN", "VU", "DD", "Total") + ...: ) + ...: ) + + In [3]: saveRDS(ppm_df, "ppm_df_r.rds") + In [4]: save(ppm_df, iucn_species_mtx, "env_objs_r.rda") + +Now, both R data files can be read in pandas to return either DataFrame +for .rds types or ``dict`` of DataFrames for .RData and .rda types: + +.. code-block:: ipython + + In [1]: ppm_df = pd.read_rdata("ppm_df_r.rda")["r_dataframe"] + In [2]: ppm_df + Out[3]: + year month monthly_average deseasonalized num_days std_dev_of_days unc_of_mon_mean + rownames + 1 2020 12 414.25 414.98 30 0.47 0.17 + 2 2021 1 415.52 415.26 29 0.44 0.16 + 3 2021 2 416.75 415.93 28 1.02 0.37 + 4 2021 3 417.64 416.18 28 0.86 0.31 + 5 2021 4 419.05 416.23 24 1.12 0.44 + + In [4]: env_objs = pd.read_rdata("env_objs_r.rda") + Out[5]: + {'carbon_ppm_df': + year month monthly_average deseasonalized num_days std_dev_of_days unc_of_mon_mean + rownames + 1 2020 12 414.25 414.98 30 0.47 0.17 + 2 2021 1 415.52 415.26 29 0.44 0.16 + 3 2021 2 416.75 415.93 28 1.02 0.37 + 4 2021 3 417.64 416.18 28 0.86 0.31 + 5 2021 4 419.05 416.23 24 1.12 0.44 + + [5 rows x 7 columns], + 'iucn_species_mtx': + EX EW CR(PE) CR(PEW) CR EN VU DD Total + rownames + MAGNOLIOPSIDA 102 30 409 29 3770 6972 7089 2990 43885 + ACTINOPTERYGII 79 10 121 6 627 989 1219 4251 20685 + AVES 159 5 22 0 223 460 798 52 11158 + INSECTA 63 1 75 0 365 730 831 2819 10865 + REPTILIA 30 2 40 0 332 588 538 1220 8492 + LILIOPSIDA 13 7 78 12 699 1302 1051 914 8192 + GASTROPODA 267 14 134 2 604 518 975 1648 7326 + AMPHIBIA 35 2 146 1 663 1060 719 1184 7212 + + [8 rows x 9 columns]} + +Additionally, pandas data can be written back out into the same R data files: + +.. code-block:: ipython + + In [5]: ppm_df.to_rdata("ppm_df_py.rds") + In [6]: env_objs['iucn_species_mtx'].to_rdata( + ...: "iucn_species_py.rda", + ...: rda_name="iucn_species_df" + ...: ) + +For more, see :ref:`io.rdata` in the user guide on IO tools. + .. _whatsnew_130.enhancements.styler: Styler enhancements @@ -234,7 +337,6 @@ For example: df df.rolling("2D", center=True).mean() - .. _whatsnew_130.enhancements.other: Other enhancements diff --git a/pandas/__init__.py b/pandas/__init__.py index db4043686bcbb..498696938d079 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -171,6 +171,7 @@ read_stata, read_sas, read_spss, + read_rdata, ) from pandas.io.json import _json_normalize as json_normalize diff --git a/pandas/_libs/src/librdata/CKHashTable.c b/pandas/_libs/src/librdata/CKHashTable.c new file mode 100644 index 0000000000000..c0312e3f5dc74 --- /dev/null +++ b/pandas/_libs/src/librdata/CKHashTable.c @@ -0,0 +1,350 @@ +// CKHashTable - A simple hash table +// Copyright 2010-2020 Evan Miller (see LICENSE) + +#include "CKHashTable.h" + +/* + SipHash reference C implementation + + Copyright (c) 2012 Jean-Philippe Aumasson <jeanphilippe.aumasson@gmail.com> + Copyright (c) 2012 Daniel J. Bernstein <djb@cr.yp.to> + + To the extent possible under law, the author(s) have dedicated all copyright + and related and neighboring rights to this software to the public domain + worldwide. This software is distributed without any warranty. + + You should have received a copy of the CC0 Public Domain Dedication along with + this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. + */ +#include <stdio.h> +#include <string.h> +#include <stdlib.h> +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint8_t u8; + + +#define ROTL(x, b) (u64)( ((x) << (b)) | ( (x) >> (64 - (b))) ) + +#define U32TO8_LE(p, v) \ +(p)[0] = (u8)((v) ); (p)[1] = (u8)((v) >> 8); \ +(p)[2] = (u8)((v) >> 16); (p)[3] = (u8)((v) >> 24); + +#define U64TO8_LE(p, v) \ +U32TO8_LE((p), (u32)((v) )); \ +U32TO8_LE((p) + 4, (u32)((v) >> 32)); + +#define U8TO64_LE(p) \ +(((u64)((p)[0])) | \ +((u64)((p)[1]) << 8) | \ +((u64)((p)[2]) << 16) | \ +((u64)((p)[3]) << 24) | \ +((u64)((p)[4]) << 32) | \ +((u64)((p)[5]) << 40) | \ +((u64)((p)[6]) << 48) | \ +((u64)((p)[7]) << 56)) + +#define SIPROUND \ +do { \ +v0 += v1; v1=ROTL(v1, 13); v1 ^= v0; v0=ROTL(v0, 32); \ +v2 += v3; v3=ROTL(v3, 16); v3 ^= v2; \ +v0 += v3; v3=ROTL(v3, 21); v3 ^= v0; \ +v2 += v1; v1=ROTL(v1, 17); v1 ^= v2; v2=ROTL(v2, 32); \ +} while (0) + +/* SipHash-1-2 */ +static int siphash( + unsigned char *out, + const unsigned char *in, + unsigned long long inlen, + const unsigned char *k) { + /* "somepseudorandomlygeneratedbytes" */ + u64 v0 = 0x736f6d6570736575ULL; + u64 v1 = 0x646f72616e646f6dULL; + u64 v2 = 0x6c7967656e657261ULL; + u64 v3 = 0x7465646279746573ULL; + u64 b; + u64 k0 = U8TO64_LE(k); + u64 k1 = U8TO64_LE(k + 8); + u64 m; + const u8 *end = in + inlen - ( inlen % sizeof( u64 ) ); + const int left = inlen & 7; + b = ((u64)inlen) << 56; + v3 ^= k1; + v2 ^= k0; + v1 ^= k1; + v0 ^= k0; + + for ( ; in != end; in += 8 ) { + m = U8TO64_LE(in); + + v3 ^= m; + + SIPROUND; + + v0 ^= m; + } + + switch ( left ) { + case 7: b |= ((u64)in[ 6]) << 48; + + case 6: b |= ((u64)in[ 5]) << 40; + + case 5: b |= ((u64)in[ 4]) << 32; + + case 4: b |= ((u64)in[ 3]) << 24; + + case 3: b |= ((u64)in[ 2]) << 16; + + case 2: b |= ((u64)in[ 1]) << 8; + + case 1: b |= ((u64)in[ 0]); break; + + case 0: break; + } + + v3 ^= b; + + SIPROUND; + + v0 ^= b; + v2 ^= 0xff; + + SIPROUND; + SIPROUND; + + b = v0 ^ v1 ^ v2 ^ v3; + U64TO8_LE(out, b); + return 0; +} + +inline uint64_t ck_hash_str(const char *str, size_t keylen) { + uint64_t hash; + unsigned char k[16] = { 0 }; + siphash((unsigned char *)&hash, (const unsigned char *)str, keylen, k); + return hash; +} + +const void *ck_float_hash_lookup(float key, ck_hash_table_t *table) { + return ck_str_n_hash_lookup((const char *)&key, sizeof(float), table); +} + +int ck_float_hash_insert( + float key, + const void *value, + ck_hash_table_t *table +) { + return ck_str_n_hash_insert( + (const char *)&key, + sizeof(float), + value, + table); +} + +const void *ck_double_hash_lookup(double key, ck_hash_table_t *table) { + return ck_str_n_hash_lookup((const char *)&key, sizeof(double), table); +} + +int ck_double_hash_insert( + double key, + const void *value, + ck_hash_table_t *table +) { + return ck_str_n_hash_insert( + (const char *)&key, + sizeof(double), + value, + table); +} + +const void *ck_str_hash_lookup(const char *key, ck_hash_table_t *table) { + size_t keylen = strlen(key); + return ck_str_n_hash_lookup(key, keylen, table); +} + +const void *ck_str_n_hash_lookup( + const char *key, + size_t keylen, + ck_hash_table_t *table +) { + if (table->count == 0) + return NULL; + + if (keylen == 0) + return NULL; + + uint64_t hash_key = ck_hash_str(key, keylen); + hash_key %= table->capacity; + uint64_t end = hash_key; + do { + char *this_key = &table->keys[table->entries[hash_key].key_offset]; + size_t this_keylen = table->entries[hash_key].key_length; + if (this_keylen == 0) + return NULL; + if (this_keylen == keylen && memcmp(this_key, key, keylen) == 0) { + return table->entries[hash_key].value; + } + hash_key++; + hash_key %= table->capacity; + } while (hash_key != end); + return NULL; +} + +int ck_str_hash_insert( + const char *key, + const void *value, + ck_hash_table_t *table +) { + size_t keylen = strlen(key); + return ck_str_n_hash_insert(key, keylen, value, table); +} + +static int ck_hash_insert_nocopy( + off_t key_offset, + size_t keylen, + uint64_t hash_key, + const void *value, + ck_hash_table_t *table +) { + if (table->capacity == 0) + return 0; + + hash_key %= table->capacity; + uint64_t end = (hash_key + table->capacity - 1) % table->capacity; + while (hash_key != end) { + ck_hash_entry_t *entry = &table->entries[hash_key]; + if (table->entries[hash_key].key_length == 0) { + table->count++; + entry->key_offset = key_offset; + entry->key_length = keylen; + entry->value = value; + return 1; + } else if (entry->key_length == keylen && + entry->key_offset == key_offset) { + entry->value = value; + return 1; + } + hash_key++; + hash_key %= table->capacity; + } + return 0; +} + +int ck_str_n_hash_insert( + const char *key, + size_t keylen, + const void *value, + ck_hash_table_t *table +) { + if (table->capacity == 0) + return 0; + + if (keylen == 0) + return 0; + + if (table->count >= 0.75 * table->capacity) { + if (ck_hash_table_grow(table) == -1) { + return 0; + } + } + + uint64_t hash_key = ck_hash_str(key, keylen); + hash_key %= table->capacity; + uint64_t end = hash_key; + do { + ck_hash_entry_t *entry = &table->entries[hash_key]; + char *this_key = &table->keys[entry->key_offset]; + if (entry->key_length == 0) { + table->count++; + while (table->keys_used + keylen > table->keys_capacity) { + table->keys_capacity *= 2; + table->keys = realloc(table->keys, table->keys_capacity); + } + memcpy(table->keys + table->keys_used, key, keylen); + entry->key_offset = table->keys_used; + entry->key_length = keylen; + table->keys_used += keylen; + entry->value = value; + return 1; + } else if (entry->key_length == keylen && + memcmp(this_key, key, keylen) == 0) { + table->entries[hash_key].value = value; + return 1; + } + hash_key++; + hash_key %= table->capacity; + } while (hash_key != end); + return 0; +} + +ck_hash_table_t *ck_hash_table_init( + size_t num_entries, + size_t mean_key_length +) { + ck_hash_table_t *table; + if ((table = malloc(sizeof(ck_hash_table_t))) == NULL) + return NULL; + + if ((table->keys = malloc(num_entries * mean_key_length)) == NULL) { + free(table); + return NULL; + } + table->keys_capacity = num_entries * mean_key_length; + + num_entries *= 2; + + if ((table->entries = malloc( + num_entries * sizeof(ck_hash_entry_t))) == NULL + ) { + free(table->keys); + free(table); + return NULL; + } + table->capacity = num_entries; + ck_hash_table_wipe(table); + return table; +} + +void ck_hash_table_free(ck_hash_table_t *table) { + free(table->entries); + if (table->keys) + free(table->keys); + free(table); +} + +void ck_hash_table_wipe(ck_hash_table_t *table) { + table->keys_used = 0; + table->count = 0; + memset(table->entries, 0, table->capacity * sizeof(ck_hash_entry_t)); +} + +int ck_hash_table_grow(ck_hash_table_t *table) { + ck_hash_entry_t *old_entries = table->entries; + uint64_t old_capacity = table->capacity; + uint64_t new_capacity = 2 * table->capacity; + if ((table->entries = calloc( + new_capacity, + sizeof(ck_hash_entry_t))) == NULL + ) { + return -1; + } + table->capacity = new_capacity; + table->count = 0; + for (unsigned int i = 0; i < old_capacity; i++) { + if (old_entries[i].key_length != 0) { + char *this_key = &table->keys[old_entries[i].key_offset]; + uint64_t hash_key = ck_hash_str( + this_key, + old_entries[i].key_length); + if (!ck_hash_insert_nocopy( + old_entries[i].key_offset, + old_entries[i].key_length, + hash_key, + old_entries[i].value, table) + ) + return -1; + } + } + free(old_entries); + return 0; +} diff --git a/pandas/_libs/src/librdata/CKHashTable.h b/pandas/_libs/src/librdata/CKHashTable.h new file mode 100644 index 0000000000000..17190e02e3521 --- /dev/null +++ b/pandas/_libs/src/librdata/CKHashTable.h @@ -0,0 +1,55 @@ +// CKHashTable - A simple hash table +// Copyright 2010-2020 Evan Miller (see LICENSE) + +#ifndef PANDAS__LIBS_SRC_LIBRDATA_CKHASHTABLE_H_ +#define PANDAS__LIBS_SRC_LIBRDATA_CKHASHTABLE_H_ + +#include <sys/types.h> +#include <stdint.h> + +typedef struct ck_hash_entry_s { + off_t key_offset; + size_t key_length; + const void *value; +} ck_hash_entry_t; + +typedef struct ck_hash_table_s { + size_t capacity; + size_t count; + ck_hash_entry_t *entries; + char *keys; + size_t keys_used; + size_t keys_capacity; +} ck_hash_table_t; + +int ck_str_hash_insert( + const char *key, const void *value, ck_hash_table_t *table +); +const void *ck_str_hash_lookup(const char *key, ck_hash_table_t *table); + +int ck_str_n_hash_insert( + const char *key, size_t keylen, const void *value, ck_hash_table_t *table +); +const void *ck_str_n_hash_lookup( + const char *key, size_t keylen, ck_hash_table_t *table +); + +int ck_float_hash_insert( + float key, const void *value, ck_hash_table_t *table +); +const void *ck_float_hash_lookup(float key, ck_hash_table_t *table); + +int ck_double_hash_insert( + double key, const void *value, ck_hash_table_t *table +); +const void *ck_double_hash_lookup(double key, ck_hash_table_t *table); + +ck_hash_table_t *ck_hash_table_init( + size_t num_entries, size_t mean_key_length +); +void ck_hash_table_wipe(ck_hash_table_t *table); +int ck_hash_table_grow(ck_hash_table_t *table); +void ck_hash_table_free(ck_hash_table_t *table); +uint64_t ck_hash_str(const char *str, size_t keylen); + +#endif // PANDAS__LIBS_SRC_LIBRDATA_CKHASHTABLE_H_ diff --git a/pandas/_libs/src/librdata/rdata.h b/pandas/_libs/src/librdata/rdata.h new file mode 100644 index 0000000000000..216c2cbab11d0 --- /dev/null +++ b/pandas/_libs/src/librdata/rdata.h @@ -0,0 +1,257 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +#ifndef PANDAS__LIBS_SRC_LIBRDATA_RDATA_H_ +#define PANDAS__LIBS_SRC_LIBRDATA_RDATA_H_ + +#include <stdint.h> +#include <sys/types.h> +#include <stdio.h> +#include <time.h> + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum rdata_type_e { + RDATA_TYPE_STRING, + RDATA_TYPE_INT32, + RDATA_TYPE_REAL, + RDATA_TYPE_LOGICAL, + RDATA_TYPE_TIMESTAMP, + RDATA_TYPE_DATE +} rdata_type_t; + +typedef enum rdata_error_e { + RDATA_OK, + RDATA_ERROR_OPEN = 1, + RDATA_ERROR_SEEK, + RDATA_ERROR_READ, + RDATA_ERROR_MALLOC, + RDATA_ERROR_USER_ABORT, + RDATA_ERROR_PARSE, + RDATA_ERROR_WRITE, + RDATA_ERROR_FACTOR, + RDATA_ERROR_UNSUPPORTED_COMPRESSION, + RDATA_ERROR_UNSUPPORTED_CHARSET, + RDATA_ERROR_CONVERT, + RDATA_ERROR_CONVERT_BAD_STRING, + RDATA_ERROR_CONVERT_LONG_STRING, + RDATA_ERROR_CONVERT_SHORT_STRING, + RDATA_ERROR_UNSUPPORTED_S_EXPRESSION, + RDATA_ERROR_UNSUPPORTED_STORAGE_CLASS +} rdata_error_t; + +typedef enum rdata_file_format_e { + RDATA_WORKSPACE, + RDATA_SINGLE_OBJECT +} rdata_file_format_t; + +const char *rdata_error_message(rdata_error_t error_code); + +typedef int (*rdata_column_handler)(const char *name, rdata_type_t type, + void *data, long count, void *ctx); +typedef int (*rdata_table_handler)(const char *name, void *ctx); +typedef int (*rdata_text_value_handler)( + const char *value, int index, void *ctx +); +typedef int (*rdata_column_name_handler)( + const char *value, int index, void *ctx +); +typedef void (*rdata_error_handler)(const char *error_message, void *ctx); +typedef int (*rdata_progress_handler)(double progress, void *ctx); + +#if defined(_MSC_VER) +#include <BaseTsd.h> +typedef SSIZE_T ssize_t; +typedef __int64 rdata_off_t; +#elif defined _WIN32 || defined __CYGWIN__ +typedef _off64_t rdata_off_t; +#elif defined _AIX +typedef off64_t rdata_off_t; +#else +typedef off_t rdata_off_t; +#endif + +typedef enum rdata_io_flags_e { + RDATA_SEEK_SET, + RDATA_SEEK_CUR, + RDATA_SEEK_END +} rdata_io_flags_t; + +typedef int (*rdata_open_handler)(const char *path, void *io_ctx); +typedef int (*rdata_close_handler)(void *io_ctx); +typedef rdata_off_t (*rdata_seek_handler)( + rdata_off_t offset, rdata_io_flags_t whence, void *io_ctx +); +typedef ssize_t (*rdata_read_handler)(void *buf, size_t nbyte, void *io_ctx); +typedef rdata_error_t (*rdata_update_handler)( + long file_size, + rdata_progress_handler progress_handler, + void *user_ctx, + void *io_ctx +); + +typedef struct rdata_io_s { + rdata_open_handler open; + rdata_close_handler close; + rdata_seek_handler seek; + rdata_read_handler read; + rdata_update_handler update; + void *io_ctx; + int external_io; +} rdata_io_t; + +typedef struct rdata_parser_s { + rdata_table_handler table_handler; + rdata_column_handler column_handler; + rdata_column_name_handler column_name_handler; + rdata_column_name_handler row_name_handler; + rdata_text_value_handler text_value_handler; + rdata_text_value_handler value_label_handler; + rdata_column_handler dim_handler; + rdata_text_value_handler dim_name_handler; + rdata_error_handler error_handler; + rdata_io_t *io; +} rdata_parser_t; + +rdata_parser_t *rdata_parser_init(void); +void rdata_parser_free(rdata_parser_t *parser); + +rdata_error_t rdata_set_table_handler( + rdata_parser_t *parser, rdata_table_handler table_handler +); +rdata_error_t rdata_set_column_handler( + rdata_parser_t *parser, rdata_column_handler column_handler +); +rdata_error_t rdata_set_column_name_handler( + rdata_parser_t *parser, rdata_column_name_handler column_name_handler +); +rdata_error_t rdata_set_row_name_handler( + rdata_parser_t *parser, rdata_column_name_handler row_name_handler +); +rdata_error_t rdata_set_text_value_handler( + rdata_parser_t *parser, rdata_text_value_handler text_value_handler +); +rdata_error_t rdata_set_value_label_handler( + rdata_parser_t *parser, rdata_text_value_handler value_label_handler +); +rdata_error_t rdata_set_dim_handler( + rdata_parser_t *parser, rdata_column_handler dim_handler +); +rdata_error_t rdata_set_dim_name_handler( + rdata_parser_t *parser, rdata_text_value_handler dim_name_handler +); +rdata_error_t rdata_set_error_handler( + rdata_parser_t *parser, rdata_error_handler error_handler +); +rdata_error_t rdata_set_open_handler( + rdata_parser_t *parser, rdata_open_handler open_handler +); +rdata_error_t rdata_set_close_handler( + rdata_parser_t *parser, rdata_close_handler close_handler +); +rdata_error_t rdata_set_seek_handler( + rdata_parser_t *parser, rdata_seek_handler seek_handler +); +rdata_error_t rdata_set_read_handler( + rdata_parser_t *parser, rdata_read_handler read_handler +); +rdata_error_t rdata_set_update_handler( + rdata_parser_t *parser, rdata_update_handler update_handler); +rdata_error_t rdata_set_io_ctx(rdata_parser_t *parser, void *io_ctx); + +/* rdata_parse works on RData and RDS. The table handler will be called once + * per data frame in RData files, and zero times on RDS files. */ + +rdata_error_t rdata_parse( + rdata_parser_t *parser, const char *filename, void *user_ctx +); + + +// Write API +typedef ssize_t (*rdata_data_writer)(const void *data, size_t len, void *ctx); + +typedef struct rdata_column_s { + rdata_type_t type; + int index; + char name[256]; + char label[1024]; + + int32_t factor_count; + char **factor; +} rdata_column_t; + +typedef struct rdata_writer_s { + rdata_file_format_t file_format; + rdata_data_writer data_writer; + size_t bytes_written; + + rdata_error_handler error_handler; + void *user_ctx; + + void *atom_table; + int bswap; + + rdata_column_t **columns; + int32_t columns_count; + int32_t columns_capacity; +} rdata_writer_t; + +rdata_writer_t *rdata_writer_init( + rdata_data_writer write_callback, rdata_file_format_t format +); +void rdata_writer_free(rdata_writer_t *writer); + +rdata_column_t *rdata_add_column( + rdata_writer_t *writer, const char *name, rdata_type_t type +); + +rdata_error_t rdata_column_set_label( + rdata_column_t *column, const char *label +); +rdata_error_t rdata_column_add_factor( + rdata_column_t *column, const char *factor); + +rdata_column_t *rdata_get_column(rdata_writer_t *writer, int32_t j +); + +rdata_error_t rdata_begin_file(rdata_writer_t *writer, void *ctx); +rdata_error_t rdata_begin_table( + rdata_writer_t *writer, const char *variable_name); +rdata_error_t rdata_begin_column( + rdata_writer_t *writer, rdata_column_t *column, int32_t row_count +); + +rdata_error_t rdata_append_real_value( + rdata_writer_t *writer, double value +); +rdata_error_t rdata_append_int32_value( + rdata_writer_t *writer, int32_t value +); +rdata_error_t rdata_append_timestamp_value( + rdata_writer_t *writer, time_t value +); +rdata_error_t rdata_append_date_value( + rdata_writer_t *writer, struct tm *value +); +rdata_error_t rdata_append_logical_value( + rdata_writer_t *writer, int value); +rdata_error_t rdata_append_string_value( + rdata_writer_t *writer, const char *value +); + +rdata_error_t rdata_end_column( + rdata_writer_t *writer, rdata_column_t *column +); +rdata_error_t rdata_end_table( + rdata_writer_t *writer, int32_t row_count, const char *datalabel +); +rdata_error_t rdata_end_file(rdata_writer_t *writer); + +#ifdef __cplusplus +} // extern c block +#endif + +#endif // PANDAS__LIBS_SRC_LIBRDATA_RDATA_H_ diff --git a/pandas/_libs/src/librdata/rdata_bits.c b/pandas/_libs/src/librdata/rdata_bits.c new file mode 100644 index 0000000000000..dd308d0e5002f --- /dev/null +++ b/pandas/_libs/src/librdata/rdata_bits.c @@ -0,0 +1,52 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +// +// readstat_bits.c - Bit-twiddling utility functions +// + +#include <sys/types.h> +#include <stdint.h> +#include <string.h> + +#include "rdata_bits.h" + +int machine_is_little_endian() { + int test_byte_order = 1; + return ((char *)&test_byte_order)[0]; +} + +uint16_t byteswap2(uint16_t num) { + return ((num & 0xFF00) >> 8) | ((num & 0x00FF) << 8); +} + +uint32_t byteswap4(uint32_t num) { + num = ((num & 0xFFFF0000) >> 16) | ((num & 0x0000FFFF) << 16); + return ((num & 0xFF00FF00) >> 8) | ((num & 0x00FF00FF) << 8); +} + +uint64_t byteswap8(uint64_t num) { + num = ((num & 0xFFFFFFFF00000000) >> 32) | + ((num & 0x00000000FFFFFFFF) << 32); + num = ((num & 0xFFFF0000FFFF0000) >> 16) | + ((num & 0x0000FFFF0000FFFF) << 16); + return ((num & 0xFF00FF00FF00FF00) >> 8) | + ((num & 0x00FF00FF00FF00FF) << 8); +} + +float byteswap_float(float num) { + uint32_t answer = 0; + memcpy(&answer, &num, 4); + answer = byteswap4(answer); + memcpy(&num, &answer, 4); + return num; +} + +double byteswap_double(double num) { + uint64_t answer = 0; + memcpy(&answer, &num, 8); + answer = byteswap8(answer); + memcpy(&num, &answer, 8); + return num; +} diff --git a/pandas/_libs/src/librdata/rdata_bits.h b/pandas/_libs/src/librdata/rdata_bits.h new file mode 100644 index 0000000000000..1bd6493dfb230 --- /dev/null +++ b/pandas/_libs/src/librdata/rdata_bits.h @@ -0,0 +1,21 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +// +// rdata_bit.h - Bit-twiddling utility functions +// + +#ifndef PANDAS__LIBS_SRC_LIBRDATA_RDATA_BITS_H_ +#define PANDAS__LIBS_SRC_LIBRDATA_RDATA_BITS_H_ + +int machine_is_little_endian(void); + +uint16_t byteswap2(uint16_t num); +uint32_t byteswap4(uint32_t num); +uint64_t byteswap8(uint64_t num); + +float byteswap_float(float num); +double byteswap_double(double num); + +#endif // PANDAS__LIBS_SRC_LIBRDATA_RDATA_BITS_H_ diff --git a/pandas/_libs/src/librdata/rdata_error.c b/pandas/_libs/src/librdata/rdata_error.c new file mode 100644 index 0000000000000..5a5cabc1f55b7 --- /dev/null +++ b/pandas/_libs/src/librdata/rdata_error.c @@ -0,0 +1,64 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +#include "rdata.h" + +const char *rdata_error_message(rdata_error_t error_code) { + if (error_code == RDATA_OK) + return NULL; + + if (error_code == RDATA_ERROR_OPEN) + return "Unable to open file"; + + if (error_code == RDATA_ERROR_SEEK) + return "Unable to seek within file"; + + if (error_code == RDATA_ERROR_READ) + return "Unable to read from file"; + + if (error_code == RDATA_ERROR_MALLOC) + return "Unable to allocate memory"; + + if (error_code == RDATA_ERROR_USER_ABORT) + return "The parsing was aborted (callback returned non-zero value)"; + + if (error_code == RDATA_ERROR_PARSE) + return "Invalid file, or file has unsupported features"; + + if (error_code == RDATA_ERROR_WRITE) + return "Unable to write to file"; + + if (error_code == RDATA_ERROR_FACTOR) + return "The provided column does not support factors"; + + if (error_code == RDATA_ERROR_UNSUPPORTED_COMPRESSION) + return "The file is compressed using an unsupported " + "compression scheme"; + + if (error_code == RDATA_ERROR_UNSUPPORTED_CHARSET) + return "File has an unsupported character set"; + + if (error_code == RDATA_ERROR_CONVERT) + return "Unable to convert string to the requested encoding"; + + if (error_code == RDATA_ERROR_CONVERT_BAD_STRING) + return "Unable to convert string to the requested " + "encoding (invalid byte sequence)"; + + if (error_code == RDATA_ERROR_CONVERT_SHORT_STRING) + return "Unable to convert string to the requested " + "encoding (incomplete byte sequence)"; + + if (error_code == RDATA_ERROR_CONVERT_LONG_STRING) + return "Unable to convert string to the requested " + "encoding (output buffer too small)"; + + if (error_code == RDATA_ERROR_UNSUPPORTED_S_EXPRESSION) + return "The file contains an unrecognized object"; + + if (error_code == RDATA_ERROR_UNSUPPORTED_STORAGE_CLASS) + return "The file contains an unrecognized object"; + + return "Unknown error"; +} diff --git a/pandas/_libs/src/librdata/rdata_internal.h b/pandas/_libs/src/librdata/rdata_internal.h new file mode 100644 index 0000000000000..ba1ba11c91f78 --- /dev/null +++ b/pandas/_libs/src/librdata/rdata_internal.h @@ -0,0 +1,89 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +// +// rdata_internal.h +// + +#ifndef PANDAS__LIBS_SRC_LIBRDATA_RDATA_INTERNAL_H_ +#define PANDAS__LIBS_SRC_LIBRDATA_RDATA_INTERNAL_H_ + +#include "rdata_bits.h" + +#pragma pack(push, 1) + +typedef struct rdata_v2_header_s { + char header[2]; + uint32_t format_version; + uint32_t writer_version; + uint32_t reader_version; +} rdata_v2_header_t; + +typedef struct rdata_sexptype_header_s { + unsigned int type:8; + unsigned int object:1; + unsigned int attributes:1; + unsigned int tag:1; + unsigned int unused:1; + unsigned int gp:16; + unsigned int padding:4; +} rdata_sexptype_header_t; + +typedef struct rdata_sexptype_info_s { + rdata_sexptype_header_t header; + int32_t attributes; + int32_t tag; + int32_t ref; +} rdata_sexptype_info_t; + +#pragma pack(pop) + +#define RDATA_SEXPTYPE_NIL 0 +#define RDATA_SEXPTYPE_SYMBOL 1 +#define RDATA_SEXPTYPE_PAIRLIST 2 +#define RDATA_SEXPTYPE_CLOSURE 3 +#define RDATA_SEXPTYPE_ENVIRONMENT 4 +#define RDATA_SEXPTYPE_PROMISE 5 +#define RDATA_SEXPTYPE_LANGUAGE_OBJECT 6 +#define RDATA_SEXPTYPE_SPECIAL_FUNCTION 7 +#define RDATA_SEXPTYPE_BUILTIN_FUNCTION 8 +#define RDATA_SEXPTYPE_CHARACTER_STRING 9 +#define RDATA_SEXPTYPE_LOGICAL_VECTOR 10 +#define RDATA_SEXPTYPE_INTEGER_VECTOR 13 +#define RDATA_SEXPTYPE_REAL_VECTOR 14 +#define RDATA_SEXPTYPE_COMPLEX_VECTOR 15 +#define RDATA_SEXPTYPE_CHARACTER_VECTOR 16 +#define RDATA_SEXPTYPE_DOT_DOT_DOT 17 +#define RDATA_SEXPTYPE_ANY 18 +#define RDATA_SEXPTYPE_GENERIC_VECTOR 19 +#define RDATA_SEXPTYPE_EXPRESSION_VECTOR 20 +#define RDATA_SEXPTYPE_BYTE_CODE 21 +#define RDATA_SEXPTYPE_EXTERNAL_POINTER 22 +#define RDATA_SEXPTYPE_WEAK_REFERENCE 23 +#define RDATA_SEXPTYPE_RAW_VECTOR 24 +#define RDATA_SEXPTYPE_S4_CLASS 25 + +#define RDATA_SEXPTYPE_FUN 99 + +#define RDATA_PSEUDO_SXP_REF 255 +#define RDATA_PSEUDO_SXP_NIL 254 +#define RDATA_PSEUDO_SXP_GLOBAL_ENVIRONMENT 253 +#define RDATA_PSEUDO_SXP_UNBOUND_VALUE 252 +#define RDATA_PSEUDO_SXP_MISSING_ARGUMENT 251 +#define RDATA_PSEUDO_SXP_BASE_NAMESPACE 250 +#define RDATA_PSEUDO_SXP_NAMESPACE 249 +#define RDATA_PSEUDO_SXP_PACKAGE 248 +#define RDATA_PSEUDO_SXP_PERSIST 247 +#define RDATA_PSEUDO_SXP_CLASS_REF 246 +#define RDATA_PSEUDO_SXP_GENERIC_REF 245 +#define RDATA_PSEUDO_SXP_BYTE_CODE_REP_DEF 244 +#define RDATA_PSEUDO_SXP_BYTE_CODE_REP_REF 243 +#define RDATA_PSEUDO_SXP_EMPTY_ENVIRONMENT 242 +#define RDATA_PSEUDO_SXP_BASE_ENVIRONMENT 241 + +#define RDATA_SEXPTYPE_LANGUAGE_OBJECT_ATTR 240 +#define RDATA_SEXPTYPE_PAIRLIST_ATTR 239 +#define RDATA_PSEUDO_SXP_ALTREP 238 + +#endif // PANDAS__LIBS_SRC_LIBRDATA_RDATA_INTERNAL_H_ diff --git a/pandas/_libs/src/librdata/rdata_io_unistd.c b/pandas/_libs/src/librdata/rdata_io_unistd.c new file mode 100644 index 0000000000000..118eb4a64a968 --- /dev/null +++ b/pandas/_libs/src/librdata/rdata_io_unistd.c @@ -0,0 +1,101 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +#include <fcntl.h> +#include <stdlib.h> +#if defined _WIN32 || defined __CYGWIN__ + #include <io.h> +#else + #include <unistd.h> +#endif + + +#include "rdata.h" +#include "rdata_io_unistd.h" + +#if defined _WIN32 || defined __CYGWIN__ +#define UNISTD_OPEN_OPTIONS O_RDONLY | O_BINARY +#elif defined _AIX +#define UNISTD_OPEN_OPTIONS O_RDONLY | O_LARGEFILE +#else +#define UNISTD_OPEN_OPTIONS O_RDONLY +#endif + +#if defined _WIN32 || defined _AIX +#define lseek lseek +#endif + + +int rdata_unistd_open_handler(const char *path, void *io_ctx) { + int fd = open(path, UNISTD_OPEN_OPTIONS); + ((rdata_unistd_io_ctx_t*) io_ctx)->fd = fd; + return fd; +} + +int rdata_unistd_close_handler(void *io_ctx) { + int fd = ((rdata_unistd_io_ctx_t*) io_ctx)->fd; + if (fd != -1) + return close(fd); + else + return 0; +} + +rdata_off_t rdata_unistd_seek_handler( + rdata_off_t offset, + rdata_io_flags_t whence, + void *io_ctx +) { + int flag = 0; + switch (whence) { + case RDATA_SEEK_SET: + flag = SEEK_SET; + break; + case RDATA_SEEK_CUR: + flag = SEEK_CUR; + break; + case RDATA_SEEK_END: + flag = SEEK_END; + break; + default: + return -1; + } + int fd = ((rdata_unistd_io_ctx_t*) io_ctx)->fd; + return lseek(fd, offset, flag); +} + +ssize_t rdata_unistd_read_handler(void *buf, size_t nbyte, void *io_ctx) { + int fd = ((rdata_unistd_io_ctx_t*) io_ctx)->fd; + ssize_t out = read(fd, buf, nbyte); + return out; +} + +rdata_error_t rdata_unistd_update_handler(long file_size, + rdata_progress_handler progress_handler, void *user_ctx, + void *io_ctx) { + if (!progress_handler) + return RDATA_OK; + + int fd = ((rdata_unistd_io_ctx_t*) io_ctx)->fd; + long current_offset = lseek(fd, 0, SEEK_CUR); + + if (current_offset == -1) + return RDATA_ERROR_SEEK; + + if (progress_handler(1.0 * current_offset / file_size, user_ctx)) + return RDATA_ERROR_USER_ABORT; + + return RDATA_OK; +} + +void rdata_unistd_io_init(rdata_parser_t *parser) { + rdata_set_open_handler(parser, rdata_unistd_open_handler); + rdata_set_close_handler(parser, rdata_unistd_close_handler); + rdata_set_seek_handler(parser, rdata_unistd_seek_handler); + rdata_set_read_handler(parser, rdata_unistd_read_handler); + rdata_set_update_handler(parser, rdata_unistd_update_handler); + + rdata_unistd_io_ctx_t *io_ctx = calloc(1, sizeof(rdata_unistd_io_ctx_t)); + io_ctx->fd = -1; + rdata_set_io_ctx(parser, (void*) io_ctx); +} diff --git a/pandas/_libs/src/librdata/rdata_io_unistd.h b/pandas/_libs/src/librdata/rdata_io_unistd.h new file mode 100644 index 0000000000000..661010c76c4aa --- /dev/null +++ b/pandas/_libs/src/librdata/rdata_io_unistd.h @@ -0,0 +1,26 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +#ifndef PANDAS__LIBS_SRC_LIBRDATA_RDATA_IO_UNISTD_H_ +#define PANDAS__LIBS_SRC_LIBRDATA_RDATA_IO_UNISTD_H_ + +typedef struct rdata_unistd_io_ctx_s { + int fd; +} rdata_unistd_io_ctx_t; + +int rdata_unistd_open_handler(const char *path, void *io_ctx); +int rdata_unistd_close_handler(void *io_ctx); +rdata_off_t rdata_unistd_seek_handler( + rdata_off_t offset, rdata_io_flags_t whence, void *io_ctx +); +ssize_t rdata_unistd_read_handler(void *buf, size_t nbytes, void *io_ctx); +rdata_error_t rdata_unistd_update_handler( + long file_size, + rdata_progress_handler progress_handler, + void *user_ctx, + void *io_ctx +); +void rdata_unistd_io_init(rdata_parser_t *parser); + +#endif // PANDAS__LIBS_SRC_LIBRDATA_RDATA_IO_UNISTD_H_ diff --git a/pandas/_libs/src/librdata/rdata_parser.c b/pandas/_libs/src/librdata/rdata_parser.c new file mode 100644 index 0000000000000..5d948a449fba3 --- /dev/null +++ b/pandas/_libs/src/librdata/rdata_parser.c @@ -0,0 +1,147 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +#include <stdlib.h> +#include "rdata.h" +#include "rdata_io_unistd.h" + +rdata_parser_t *rdata_parser_init() { + rdata_parser_t *parser = calloc(1, sizeof(rdata_parser_t)); + parser->io = calloc(1, sizeof(rdata_io_t)); + rdata_unistd_io_init(parser); + return parser; +} + +void rdata_parser_free(rdata_parser_t *parser) { + if (parser) { + if (parser->io) + free(parser->io); + free(parser); + } +} + +rdata_error_t rdata_set_table_handler( + rdata_parser_t *parser, + rdata_table_handler table_handler +) { + parser->table_handler = table_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_column_handler( + rdata_parser_t *parser, + rdata_column_handler column_handler +) { + parser->column_handler = column_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_column_name_handler( + rdata_parser_t *parser, + rdata_column_name_handler column_name_handler +) { + parser->column_name_handler = column_name_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_row_name_handler( + rdata_parser_t *parser, + rdata_column_name_handler row_name_handler +) { + parser->row_name_handler = row_name_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_text_value_handler( + rdata_parser_t *parser, + rdata_text_value_handler text_value_handler +) { + parser->text_value_handler = text_value_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_value_label_handler( + rdata_parser_t *parser, + rdata_text_value_handler value_label_handler +) { + parser->value_label_handler = value_label_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_dim_handler( + rdata_parser_t *parser, + rdata_column_handler dim_handler +) { + parser->dim_handler = dim_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_dim_name_handler( + rdata_parser_t *parser, + rdata_text_value_handler dim_name_handler +) { + parser->dim_name_handler = dim_name_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_error_handler( + rdata_parser_t *parser, + rdata_error_handler error_handler +) { + parser->error_handler = error_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_open_handler( + rdata_parser_t *parser, + rdata_open_handler open_handler +) { + parser->io->open = open_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_close_handler( + rdata_parser_t *parser, + rdata_close_handler close_handler +) { + parser->io->close = close_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_seek_handler( + rdata_parser_t *parser, + rdata_seek_handler seek_handler +) { + parser->io->seek = seek_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_read_handler( + rdata_parser_t *parser, + rdata_read_handler read_handler +) { + parser->io->read = read_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_update_handler( + rdata_parser_t *parser, + rdata_update_handler update_handler +) { + parser->io->update = update_handler; + return RDATA_OK; +} + +rdata_error_t rdata_set_io_ctx( + rdata_parser_t *parser, + void *io_ctx +) { + if (!parser->io->external_io) + free(parser->io->io_ctx); + + parser->io->io_ctx = io_ctx; + parser->io->external_io = 1; + + return RDATA_OK; +} diff --git a/pandas/_libs/src/librdata/rdata_read.c b/pandas/_libs/src/librdata/rdata_read.c new file mode 100644 index 0000000000000..dbc165a2273dc --- /dev/null +++ b/pandas/_libs/src/librdata/rdata_read.c @@ -0,0 +1,2172 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +// +// rdata_rdata.c +// + +#include <stdlib.h> +#include <string.h> +#include <stdint.h> +#include <sys/types.h> +#include <stdio.h> +#include <math.h> +#include <limits.h> + +#if defined(_WIN32) +#include "win_iconv.h" +#elif __linux__ +#include "unix_iconv.h" +#else +#include <iconv.h> +#endif + +#include <errno.h> +#include <stdbool.h> + +#if HAVE_BZIP2 +#include <bzlib.h> +#endif + +#if HAVE_APPLE_COMPRESSION +#include <compression.h> +#endif + +#if HAVE_ZLIB +#include <zlib.h> +#endif + +#if HAVE_LZMA +#include <lzma.h> +#endif + +#include "rdata.h" +#include "rdata_internal.h" + +#define RDATA_CLASS_POSIXCT 0x01 +#define RDATA_CLASS_DATE 0x02 + +#define STREAM_BUFFER_SIZE 65536 +#define MAX_ARRAY_DIMENSIONS 3 + +/* ICONV_CONST defined by autotools during configure according + * to the current platform. Some people copy-paste the source code, so + * provide some fallback logic */ +#ifndef ICONV_CONST +#define ICONV_CONST +#endif + +typedef struct rdata_atom_table_s { + int count; + char **data; +} rdata_atom_table_t; + +typedef struct rdata_ctx_s { + int machine_needs_byteswap; + rdata_table_handler table_handler; + rdata_column_handler column_handler; + rdata_column_name_handler column_name_handler; + rdata_column_name_handler row_name_handler; + rdata_text_value_handler text_value_handler; + rdata_text_value_handler value_label_handler; + rdata_column_handler dim_handler; + rdata_text_value_handler dim_name_handler; + rdata_error_handler error_handler; + void *user_ctx; +#if HAVE_BZIP2 + bz_stream *bz_strm; +#endif +#if HAVE_APPLE_COMPRESSION + compression_stream *compression_strm; +#endif +#if HAVE_ZLIB + z_stream *z_strm; +#endif +#if HAVE_LZMA + lzma_stream *lzma_strm; +#endif + void *strm_buffer; + rdata_io_t *io; + size_t bytes_read; + + rdata_atom_table_t *atom_table; + unsigned int column_class; + + iconv_t converter; + + int32_t dims[MAX_ARRAY_DIMENSIONS]; + bool is_dimnames; +} rdata_ctx_t; + +static int atom_table_add(rdata_atom_table_t *table, char *key); +static char *atom_table_lookup(rdata_atom_table_t *table, int index); + +static rdata_error_t read_environment( + const char *table_name, + rdata_ctx_t *ctx); +static rdata_error_t read_toplevel_object( + const char *table_name, + const char *key, + rdata_ctx_t *ctx); +static rdata_error_t read_sexptype_header( + rdata_sexptype_info_t *header, + rdata_ctx_t *ctx); +static rdata_error_t read_length( + int32_t *outLength, + rdata_ctx_t *ctx); +static rdata_error_t read_string_vector_n( + int attributes, + int32_t length, + rdata_text_value_handler text_value_handler, + void *callback_ctx, + rdata_ctx_t *ctx); +static rdata_error_t read_string_vector( + int attributes, + rdata_text_value_handler text_value_handler, + void *callback_ctx, + rdata_ctx_t *ctx); +static rdata_error_t read_value_vector( + rdata_sexptype_header_t header, + const char *name, + rdata_ctx_t *ctx); +static rdata_error_t read_value_vector_cb( + rdata_sexptype_header_t header, + const char *name, + rdata_column_handler column_handler, + void *user_ctx, + rdata_ctx_t *ctx); +static rdata_error_t read_character_string( + char **key, + rdata_ctx_t *ctx); +static rdata_error_t read_generic_list( + int attributes, + rdata_ctx_t *ctx); +static rdata_error_t read_altrep_vector( + const char *name, + rdata_ctx_t *ctx); +static rdata_error_t read_attributes(int (*handle_attribute)( + char *key, + rdata_sexptype_info_t val_info, + rdata_ctx_t *ctx), + rdata_ctx_t *ctx); +static rdata_error_t recursive_discard( + rdata_sexptype_header_t sexptype_header, + rdata_ctx_t *ctx); + +static void *rdata_malloc(size_t len) { + if (len == 0) + return NULL; + + return malloc(len); +} + +static void *rdata_realloc(void *buf, size_t len) { + if (len == 0) + return NULL; + + return realloc(buf, len); +} + +static int atom_table_add(rdata_atom_table_t *table, char *key) { + table->data = realloc(table->data, sizeof(char *) * (table->count + 1)); + table->data[table->count++] = strdup(key); + return table->count; +} + +static char *atom_table_lookup(rdata_atom_table_t *table, int index) { + if (index <= 0 || index > table->count) { + return NULL; + } + return table->data[(index-1)]; +} + +#if HAVE_BZIP2 +static ssize_t read_st_bzip2(rdata_ctx_t *ctx, void *buffer, size_t len) { + ssize_t bytes_written = 0; + int error = 0; + int result = BZ_OK; + while (1) { + ssize_t start_out = ctx->bz_strm->total_out_lo32 + + ((ssize_t)ctx->bz_strm->total_out_hi32 << 32LL); + + ctx->bz_strm->next_out = (char *)buffer + bytes_written; + ctx->bz_strm->avail_out = len - bytes_written; + + result = BZ2_bzDecompress(ctx->bz_strm); + + if (result != BZ_OK && result != BZ_STREAM_END) { + error = -1; + break; + } + + bytes_written += ctx->bz_strm->total_out_lo32 + + ((ssize_t)ctx->bz_strm->total_out_hi32 << 32LL) - start_out; + + if (result == BZ_STREAM_END) + break; + + if (ctx->bz_strm->avail_in == 0) { + int bytes_read = 0; + bytes_read = ctx->io->read( + ctx->strm_buffer, + STREAM_BUFFER_SIZE, + ctx->io->io_ctx); + if (bytes_read < 0) { + error = bytes_read; + break; + } + if (bytes_read == 0) + break; + + ctx->bz_strm->next_in = ctx->strm_buffer; + ctx->bz_strm->avail_in = bytes_read; + } + if (bytes_written == len) + break; + } + + if (error != 0) + return error; + + return bytes_written; +} +#endif /* HAVE_BZIP2 */ + +#if HAVE_APPLE_COMPRESSION +static ssize_t read_st_compression( + rdata_ctx_t *ctx, + void *buffer, + size_t len +) { + ssize_t bytes_written = 0; + int error = 0; + compression_status result = COMPRESSION_STATUS_OK; + size_t start_size = len; + + ctx->compression_strm->dst_ptr = (unsigned char *)buffer; + ctx->compression_strm->dst_size = len; + + while (1) { + start_size = ctx->compression_strm->dst_size; + + result = compression_stream_process(ctx->compression_strm, 0); + + if (result == COMPRESSION_STATUS_OK) { + bytes_written += start_size - ctx->compression_strm->dst_size; + } else { + error = -1; + break; + } + + if (ctx->compression_strm->src_size == 0) { + int bytes_read = 0; + bytes_read = ctx->io->read( + ctx->compression_strm, + STREAM_BUFFER_SIZE, + ctx->io->io_ctx); + if (bytes_read < 0) { + error = bytes_read; + break; + } + if (bytes_read == 0) { + start_size = ctx->compression_strm->dst_size; + result = compression_stream_process( + ctx->compression_strm, + COMPRESSION_STREAM_FINALIZE); + if (result == COMPRESSION_STATUS_END) { + bytes_written += ( + start_size - ctx->compression_strm->dst_size); + } else { + error = -1; + } + break; + } + + ctx->compression_strm->src_ptr = ctx->strm_buffer; + ctx->compression_strm->src_size = bytes_read; + } + if (bytes_written == len) + break; + } + + if (error != 0) + return error; + + return bytes_written; +} +#endif /* HAVE_APPLE_COMPRESSION */ + +#if HAVE_ZLIB +static ssize_t read_st_z(rdata_ctx_t *ctx, void *buffer, size_t len) { + ssize_t bytes_written = 0; + int error = 0; + int result = Z_OK; + while (1) { + long start_out = ctx->z_strm->total_out; + + ctx->z_strm->next_out = (unsigned char *)buffer + bytes_written; + ctx->z_strm->avail_out = len - bytes_written; + + result = inflate(ctx->z_strm, Z_SYNC_FLUSH); + + if (result != Z_OK && result != Z_STREAM_END) { + error = -1; + break; + } + + bytes_written += ctx->z_strm->total_out - start_out; + + if (result == Z_STREAM_END) + break; + + if (ctx->z_strm->avail_in == 0) { + int bytes_read = 0; + bytes_read = ctx->io->read( + ctx->strm_buffer, + STREAM_BUFFER_SIZE, + ctx->io->io_ctx); + if (bytes_read < 0) { + error = bytes_read; + break; + } + if (bytes_read == 0) + break; + + ctx->z_strm->next_in = ctx->strm_buffer; + ctx->z_strm->avail_in = bytes_read; + } + if (bytes_written == len) + break; + } + + if (error != 0) + return error; + + return bytes_written; +} +#endif /* HAVE_ZLIB */ + +#if HAVE_LZMA +static ssize_t read_st_lzma(rdata_ctx_t *ctx, void *buffer, size_t len) { + ssize_t bytes_written = 0; + int error = 0; + int result = LZMA_OK; + while (1) { + long start_out = ctx->lzma_strm->total_out; + + ctx->lzma_strm->next_out = (unsigned char *)buffer + bytes_written; + ctx->lzma_strm->avail_out = len - bytes_written; + + result = lzma_code(ctx->lzma_strm, LZMA_RUN); + + if (result != LZMA_OK && result != LZMA_STREAM_END) { + error = -1; + break; + } + + bytes_written += ctx->lzma_strm->total_out - start_out; + + if (result == LZMA_STREAM_END) + break; + + if (ctx->lzma_strm->avail_in == 0) { + int bytes_read = 0; + bytes_read = ctx->io->read( + ctx->strm_buffer, + STREAM_BUFFER_SIZE, + ctx->io->io_ctx); + if (bytes_read < 0) { + error = bytes_read; + break; + } + if (bytes_read == 0) + break; + + ctx->lzma_strm->next_in = ctx->strm_buffer; + ctx->lzma_strm->avail_in = bytes_read; + } + if (bytes_written == len) + break; + } + + if (error != 0) + return error; + + return bytes_written; +} +#endif /* HAVE_LZMA */ + +static ssize_t read_st(rdata_ctx_t *ctx, void *buffer, size_t len) { + ssize_t bytes_read = 0; + + if (len == 0) + return 0; + +#if HAVE_BZIP2 + if (ctx->bz_strm) { + bytes_read = read_st_bzip2(ctx, buffer, len); + } else // NOLINT +#endif +#if HAVE_APPLE_COMPRESSION + if (ctx->compression_strm) { + bytes_read = read_st_compression(ctx, buffer, len); + } else // NOLINT +#endif +#if HAVE_ZLIB + if (ctx->z_strm) { + bytes_read = read_st_z(ctx, buffer, len); + } else // NOLINT +#endif +#if HAVE_LZMA + if (ctx->lzma_strm) { + bytes_read = read_st_lzma(ctx, buffer, len); + } else // NOLINT +#endif + { + bytes_read = ctx->io->read(buffer, len, ctx->io->io_ctx); + } + + if (bytes_read > 0) { + ctx->bytes_read += bytes_read; + } + + return bytes_read; +} + +static int lseek_st(rdata_ctx_t *ctx, size_t len) { + if (0 +#if HAVE_BZIP2 + || ctx->bz_strm +#endif +#if HAVE_APPLE_COMPRESSION + || ctx->compression_strm +#endif +#if HAVE_ZLIB + || ctx->z_strm +#endif +#if HAVE_LZMA + || ctx->lzma_strm +#endif + ) { + int retval = 0; + char *buf = rdata_malloc(len); + + int result_st = read_st(ctx, buf, len); + + if (result_st > 0) { + if (buf == NULL) { + retval = -1; + } else if ((size_t)result_st != len) { + retval = -1; + } + } else { + if (buf == NULL) { + retval = -1; + } else { + retval = -1; + } + } + + if (buf) + free(buf); + + return retval; + } + + return ctx->io->seek(len, SEEK_CUR, ctx->io->io_ctx); +} + +static rdata_error_t init_bz_stream(rdata_ctx_t *ctx) { + rdata_error_t retval = RDATA_OK; + ctx->strm_buffer = malloc(STREAM_BUFFER_SIZE); + int bytes_read = ctx->io->read( + ctx->strm_buffer, + STREAM_BUFFER_SIZE, + ctx->io->io_ctx); + if (bytes_read <= 0) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + +#if HAVE_BZIP2 + ctx->bz_strm = calloc(1, sizeof(bz_stream)); + ctx->bz_strm->next_in = ctx->strm_buffer; + ctx->bz_strm->avail_in = bytes_read; + + if (BZ2_bzDecompressInit(ctx->bz_strm, 0, 0) != BZ_OK) { + retval = RDATA_ERROR_MALLOC; + goto cleanup; + } +#else + retval = RDATA_ERROR_UNSUPPORTED_COMPRESSION; + goto cleanup; +#endif + +cleanup: + return retval; +} + +static rdata_error_t init_z_stream(rdata_ctx_t *ctx) { + rdata_error_t retval = RDATA_OK; + ctx->strm_buffer = malloc(STREAM_BUFFER_SIZE); + int bytes_read = ctx->io->read( + ctx->strm_buffer, + STREAM_BUFFER_SIZE, + ctx->io->io_ctx); + if (bytes_read <= 0) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + +#if HAVE_ZLIB + ctx->z_strm = calloc(1, sizeof(z_stream)); + ctx->z_strm->next_in = ctx->strm_buffer; + ctx->z_strm->avail_in = bytes_read; + + if (inflateInit2(ctx->z_strm, (15+32)) != Z_OK) { + retval = RDATA_ERROR_MALLOC; + goto cleanup; + } +#else + retval = RDATA_ERROR_UNSUPPORTED_COMPRESSION; + goto cleanup; +#endif + +cleanup: + return retval; +} + +static rdata_error_t init_lzma_stream(rdata_ctx_t *ctx) { + rdata_error_t retval = RDATA_OK; + ctx->strm_buffer = malloc(STREAM_BUFFER_SIZE); + int bytes_read = ctx->io->read( + ctx->strm_buffer, + STREAM_BUFFER_SIZE, + ctx->io->io_ctx); + if (bytes_read <= 0) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + +#if HAVE_APPLE_COMPRESSION + ctx->compression_strm = calloc(1, sizeof(compression_stream)); + + if (compression_stream_init( + ctx->compression_strm, + COMPRESSION_STREAM_DECODE, + COMPRESSION_LZMA) == COMPRESSION_STATUS_ERROR + ) { + retval = RDATA_ERROR_MALLOC; + goto cleanup; + } + + ctx->compression_strm->src_ptr = ctx->strm_buffer; + ctx->compression_strm->src_size = bytes_read; +#elif HAVE_LZMA + ctx->lzma_strm = calloc(1, sizeof(lzma_stream)); + + if (lzma_stream_decoder(ctx->lzma_strm, UINT64_MAX, 0) != LZMA_OK) { + retval = RDATA_ERROR_MALLOC; + goto cleanup; + } + + ctx->lzma_strm->next_in = ctx->strm_buffer; + ctx->lzma_strm->avail_in = bytes_read; +#else + retval = RDATA_ERROR_UNSUPPORTED_COMPRESSION; + goto cleanup; +#endif + +cleanup: + return retval; +} + +static rdata_error_t init_stream(rdata_ctx_t *ctx) { + rdata_error_t retval = RDATA_OK; + char header[5]; + + if (ctx->io->read( + &header, + sizeof(header), + ctx->io->io_ctx) != sizeof(header) + ) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + + if (ctx->io->seek(0, SEEK_SET, ctx->io->io_ctx) == -1) { + retval = RDATA_ERROR_SEEK; + goto cleanup; + } + + if (header[0] == 'B' && header[1] == 'Z' && header[2] == 'h' && + header[3] >= '0' && header[3] <= '9') { + return init_bz_stream(ctx); + } + if (header[0] == '\x1f' && header[1] == '\x8b') { + return init_z_stream(ctx); + } + if (strncmp("\xFD" "7zXZ", header, sizeof(header)) == 0) { + return init_lzma_stream(ctx); + } + +cleanup: + return retval; +} + +static rdata_error_t reset_stream(rdata_ctx_t *ctx) { +#if HAVE_BZIP2 + if (ctx->bz_strm) { + BZ2_bzDecompressEnd(ctx->bz_strm); + free(ctx->bz_strm); + ctx->bz_strm = NULL; + } +#endif +#if HAVE_APPLE_COMPRESSION + if (ctx->compression_strm) { + compression_stream_destroy(ctx->compression_strm); + free(ctx->compression_strm); + ctx->compression_strm = NULL; + } +#endif +#if HAVE_ZLIB + if (ctx->z_strm) { + inflateEnd(ctx->z_strm); + free(ctx->z_strm); + ctx->z_strm = NULL; + } +#endif +#if HAVE_LZMA + if (ctx->lzma_strm) { + lzma_end(ctx->lzma_strm); + free(ctx->lzma_strm); + ctx->lzma_strm = NULL; + } +#endif + + if (ctx->io->seek(0, SEEK_SET, ctx->io->io_ctx) == -1) { + return RDATA_ERROR_SEEK; + } + return init_stream(ctx); +} + +static rdata_error_t rdata_convert( + char *dst, + size_t dst_len, + const char *src, + size_t src_len, + iconv_t converter +) { + if (dst_len == 0) { + return RDATA_ERROR_CONVERT_LONG_STRING; + } else if (converter) { + size_t dst_left = dst_len - 1; + char *dst_end = dst; + size_t status = iconv(converter, ( + ICONV_CONST char **)&src, + &src_len, + &dst_end, + &dst_left); + if (status == (size_t)-1) { + if (errno == E2BIG) { + return RDATA_ERROR_CONVERT_LONG_STRING; + } else if (errno == EILSEQ) { + return RDATA_ERROR_CONVERT_BAD_STRING; + } else if (errno != EINVAL) { + /* EINVAL indicates improper truncation; accept it */ + return RDATA_ERROR_CONVERT; + } + } + dst[dst_len - dst_left - 1] = '\0'; + } else if (src_len + 1 > dst_len) { + return RDATA_ERROR_CONVERT_LONG_STRING; + } else { + memcpy(dst, src, src_len); + dst[src_len] = '\0'; + } + return RDATA_OK; +} + +rdata_ctx_t *rdata_ctx_init(rdata_io_t *io, const char *filename) { + int fd = io->open(filename, io->io_ctx); + if (fd == -1) { + return NULL; + } + rdata_ctx_t *ctx = calloc(1, sizeof(rdata_ctx_t)); + rdata_atom_table_t *atom_table = malloc(sizeof(rdata_atom_table_t)); + + atom_table->count = 0; + atom_table->data = NULL; + + ctx->atom_table = atom_table; + + ctx->machine_needs_byteswap = 0; + if (machine_is_little_endian()) { + ctx->machine_needs_byteswap = 1; + } + + ctx->io = io; + + return ctx; +} + +void free_rdata_ctx(rdata_ctx_t *ctx) { + if (ctx->io) { + ctx->io->close(ctx->io->io_ctx); + } + if (ctx->atom_table) { + if (ctx->atom_table->data) { + int i; + for (i=0; i < ctx->atom_table->count; i++) + free(ctx->atom_table->data[i]); + free(ctx->atom_table->data); + } + free(ctx->atom_table); + } +#if HAVE_BZIP2 + if (ctx->bz_strm) { + BZ2_bzDecompressEnd(ctx->bz_strm); + free(ctx->bz_strm); + } +#endif +#if HAVE_APPLE_COMPRESSION + if (ctx->compression_strm) { + compression_stream_destroy(ctx->compression_strm); + free(ctx->compression_strm); + } +#endif +#if HAVE_ZLIB + if (ctx->z_strm) { + inflateEnd(ctx->z_strm); + free(ctx->z_strm); + } +#endif +#if HAVE_LZMA + if (ctx->lzma_strm) { + lzma_end(ctx->lzma_strm); + free(ctx->lzma_strm); + } +#endif + if (ctx->strm_buffer) { + free(ctx->strm_buffer); + } + if (ctx->converter) { + iconv_close(ctx->converter); + } + free(ctx); +} + +rdata_error_t rdata_parse( + rdata_parser_t *parser, + const char *filename, + void *user_ctx +) { + int is_rdata = 0; + rdata_error_t retval = RDATA_OK; + rdata_v2_header_t v2_header; + rdata_ctx_t *ctx = rdata_ctx_init(parser->io, filename); + char *encoding = NULL; + + if (ctx == NULL) { + retval = RDATA_ERROR_OPEN; + goto cleanup; + } + + ctx->user_ctx = user_ctx; + ctx->table_handler = parser->table_handler; + ctx->column_handler = parser->column_handler; + ctx->column_name_handler = parser->column_name_handler; + ctx->row_name_handler = parser->row_name_handler; + ctx->text_value_handler = parser->text_value_handler; + ctx->value_label_handler = parser->value_label_handler; + ctx->dim_handler = parser->dim_handler; + ctx->dim_name_handler = parser->dim_name_handler; + ctx->error_handler = parser->error_handler; + + ctx->is_dimnames = false; + + if ((retval = init_stream(ctx)) != RDATA_OK) { + goto cleanup; + } + + char header_line[5]; + if (read_st( + ctx, &header_line, + sizeof(header_line)) != sizeof(header_line) + ) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + if (memcmp("RDX", header_line, 3) == 0 && header_line[4] == '\n') { + is_rdata = 1; + } else { + reset_stream(ctx); + } + + if (read_st(ctx, &v2_header, sizeof(v2_header)) != sizeof(v2_header)) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + + if (ctx->machine_needs_byteswap) { + v2_header.format_version = byteswap4(v2_header.format_version); + v2_header.writer_version = byteswap4(v2_header.writer_version); + v2_header.reader_version = byteswap4(v2_header.reader_version); + } + + int32_t hdr_result = header_line[3] - '0'; + + if (hdr_result > 0) { + if (is_rdata && v2_header.format_version != (uint32_t)hdr_result) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + } else { + if (is_rdata) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + } + + if (v2_header.format_version == 3) { + retval = read_character_string(&encoding, ctx); + if (retval != RDATA_OK) + goto cleanup; + + if (strcmp("UTF-8", encoding) != 0) { + if ((ctx->converter = iconv_open("UTF-8", encoding)) + == (iconv_t)-1 + ) { + ctx->converter = NULL; + retval = RDATA_ERROR_UNSUPPORTED_CHARSET; + goto cleanup; + } + } + } + + if (is_rdata) { + retval = read_environment(NULL, ctx); + } else { + retval = read_toplevel_object(NULL, NULL, ctx); + } + if (retval != RDATA_OK) + goto cleanup; + + char test; + + if (read_st(ctx, &test, 1) == 1) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + +cleanup: + if (encoding) + free(encoding); + if (ctx) { + free_rdata_ctx(ctx); + } + + return retval; +} + + +static rdata_error_t read_toplevel_object( + const char *table_name, + const char *key, + rdata_ctx_t *ctx +) { + rdata_sexptype_info_t sexptype_info; + rdata_error_t retval = RDATA_OK; + + sexptype_info.attributes = 0; + sexptype_info.tag = 0; + sexptype_info.ref = 0; + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + + if (sexptype_info.header.type == RDATA_SEXPTYPE_REAL_VECTOR || + sexptype_info.header.type == RDATA_SEXPTYPE_INTEGER_VECTOR || + sexptype_info.header.type == RDATA_SEXPTYPE_LOGICAL_VECTOR) { + if (table_name == NULL && ctx->table_handler) { + if (ctx->table_handler(key, ctx->user_ctx)) { + retval = RDATA_ERROR_USER_ABORT; + goto cleanup; + } + } + + if ((retval = read_value_vector( + sexptype_info.header, + key, + ctx)) != RDATA_OK + ) + goto cleanup; + } else if (sexptype_info.header.type == RDATA_SEXPTYPE_CHARACTER_VECTOR) { + if (table_name == NULL && ctx->table_handler) { + if (ctx->table_handler(key, ctx->user_ctx)) { + retval = RDATA_ERROR_USER_ABORT; + goto cleanup; + } + } + int32_t length; + + if ((retval = read_length(&length, ctx)) != RDATA_OK) + goto cleanup; + + if (ctx->column_handler) { + if (ctx->column_handler( + key, + RDATA_TYPE_STRING, NULL, + length, ctx->user_ctx) + ) { + retval = RDATA_ERROR_USER_ABORT; + goto cleanup; + } + } + + if ((retval = read_string_vector_n( + sexptype_info.header.attributes, + length, + ctx->text_value_handler, + ctx->user_ctx, ctx)) != RDATA_OK) + goto cleanup; + } else if (sexptype_info.header.type == RDATA_PSEUDO_SXP_ALTREP) { + if (table_name == NULL && ctx->table_handler) { + if (ctx->table_handler(key, ctx->user_ctx)) { + retval = RDATA_ERROR_USER_ABORT; + goto cleanup; + } + } + if ((retval = read_altrep_vector(key, ctx)) != RDATA_OK) + goto cleanup; + } else if (sexptype_info.header.type == RDATA_SEXPTYPE_GENERIC_VECTOR && + sexptype_info.header.object && sexptype_info.header.attributes) { + if (table_name != NULL) { + retval = recursive_discard(sexptype_info.header, ctx); + } else { + if (ctx->table_handler) { + if (ctx->table_handler(key, ctx->user_ctx)) { + retval = RDATA_ERROR_USER_ABORT; + goto cleanup; + } + } + retval = read_generic_list(sexptype_info.header.attributes, ctx); + } + if (retval != RDATA_OK) + goto cleanup; + } else { + if ((retval = recursive_discard(sexptype_info.header, ctx)) + != RDATA_OK + ) + goto cleanup; + } + +cleanup: + + return retval; +} + +static rdata_error_t read_environment( + const char *table_name, + rdata_ctx_t *ctx +) { + rdata_error_t retval = RDATA_OK; + char *key = NULL; + + while (1) { + rdata_sexptype_info_t sexptype_info; + + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + + if (sexptype_info.header.type == RDATA_PSEUDO_SXP_NIL) + break; + + if (sexptype_info.header.type != RDATA_SEXPTYPE_PAIRLIST) { + if ((retval = recursive_discard( + sexptype_info.header, + ctx)) != RDATA_OK) + goto cleanup; + continue; + } + + if ((key = atom_table_lookup( + ctx->atom_table, + sexptype_info.ref)) == NULL) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + + if ((retval = read_toplevel_object(table_name, key, ctx)) != RDATA_OK) + goto cleanup; + } + +cleanup: + + return retval; +} + +static rdata_error_t read_sexptype_header( + rdata_sexptype_info_t *header_info, + rdata_ctx_t *ctx +) { + uint32_t sexptype; + rdata_sexptype_header_t header; + rdata_error_t retval = RDATA_OK; + if (read_st(ctx, &sexptype, sizeof(sexptype)) != sizeof(sexptype)) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + if (ctx->machine_needs_byteswap) + sexptype = byteswap4(sexptype); + + memcpy(&header, &sexptype, sizeof(sexptype)); + uint32_t attributes = 0, tag = 0, ref = 0; + + if (header.type == RDATA_SEXPTYPE_PAIRLIST_ATTR) { + header.attributes = 1; + header.type = RDATA_SEXPTYPE_PAIRLIST; + } + if (header.type == RDATA_SEXPTYPE_LANGUAGE_OBJECT_ATTR) { + header.attributes = 1; + header.type = RDATA_SEXPTYPE_LANGUAGE_OBJECT; + } + if (header.type == RDATA_SEXPTYPE_PAIRLIST) { + if (header.attributes) { + if (read_st( + ctx, + &attributes, + sizeof(attributes)) != sizeof(attributes) + ) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + if (ctx->machine_needs_byteswap) { + header_info->attributes = byteswap4(header_info->attributes); + } + } + if (header.tag) { + if (read_st(ctx, &tag, sizeof(tag)) != sizeof(tag)) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + if (ctx->machine_needs_byteswap) + tag = byteswap4(tag); + } + + if (tag == 1) { + rdata_sexptype_info_t key_info; + + if ((retval = read_sexptype_header(&key_info, ctx)) != RDATA_OK) + goto cleanup; + + if (key_info.header.type != RDATA_SEXPTYPE_CHARACTER_STRING) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + + char *key = NULL; + if ((retval = read_character_string(&key, ctx)) != RDATA_OK) + goto cleanup; + + ref = atom_table_add(ctx->atom_table, key); + + free(key); + } else if ((tag & 0xFF) == RDATA_PSEUDO_SXP_REF) { + ref = (tag >> 8); + } + } + if (header.type == RDATA_PSEUDO_SXP_REF) { + ref = (sexptype >> 8); + } + + header_info->header = header; + header_info->attributes = attributes; + header_info->tag = tag; + header_info->ref = ref; + +cleanup: + + return retval; +} + +static int handle_class_name(const char *buf, int i, void *ctx) { + unsigned int *column_class = (unsigned int *)ctx; + if (buf) { + if (strcmp(buf, "POSIXct") == 0) { + *column_class |= RDATA_CLASS_POSIXCT; + } + if (strcmp(buf, "Date") == 0) { + *column_class |= RDATA_CLASS_DATE; + } + } + return RDATA_OK; +} + +static int handle_vector_attribute( + char *key, + rdata_sexptype_info_t val_info, + rdata_ctx_t *ctx +) { + rdata_error_t retval = RDATA_OK; + if (strcmp(key, "levels") == 0) { + retval = read_string_vector( + val_info.header.attributes, + ctx->value_label_handler, + ctx->user_ctx, ctx); + } else if (strcmp(key, "class") == 0) { + ctx->column_class = 0; + retval = read_string_vector( + val_info.header.attributes, + &handle_class_name, + &ctx->column_class, ctx); + } else if (strcmp(key, "dim") == 0) { + if (val_info.header.type == RDATA_SEXPTYPE_INTEGER_VECTOR) { + int32_t length; + if ((retval = read_length(&length, ctx)) != RDATA_OK) + goto cleanup; + + if ((uint32_t)length <= sizeof(ctx->dims)/sizeof(ctx->dims[0])) { + int buf_len = length * sizeof(int32_t); + if (read_st(ctx, ctx->dims, buf_len) != buf_len) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + if (ctx->machine_needs_byteswap) { + int i; + for (i=0; i < length; i++) { + ctx->dims[i] = byteswap4(ctx->dims[i]); + } + } + if (ctx->dim_handler) { + if (ctx->dim_handler( + key, + RDATA_TYPE_INT32, + ctx->dims, length, + ctx->user_ctx) + ) { + retval = RDATA_ERROR_USER_ABORT; + } + } + } + } + } else if (strcmp(key, "dimnames") == 0) { + ctx->is_dimnames = true; + retval = read_generic_list(val_info.header.attributes, ctx); + } else { + retval = recursive_discard(val_info.header, ctx); + } +cleanup: + return retval; +} + +static rdata_error_t read_character_string(char **key, rdata_ctx_t *ctx) { + uint32_t length; + char *string = NULL; + char *utf8_string = NULL; + rdata_error_t retval = RDATA_OK; + + if (read_st(ctx, &length, sizeof(length)) != sizeof(length)) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + + if (ctx->machine_needs_byteswap) + length = byteswap4(length); + + if ((int32_t)length == -1 || length == 0) { + *key = strdup(""); + return RDATA_OK; + } + + if (length < 0) { + return RDATA_ERROR_PARSE; + } + + if ((string = rdata_malloc(length)) == NULL) { + retval = RDATA_ERROR_MALLOC; + goto cleanup; + } + + if (read_st(ctx, string, length) != length) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + + if ((utf8_string = rdata_malloc(4*length+1)) == NULL) { + retval = RDATA_ERROR_MALLOC; + goto cleanup; + } + + retval = rdata_convert( + utf8_string, + 4 * length + 1, + string, length, + ctx->converter); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + if (string) + free(string); + + if (retval == RDATA_OK) { + *key = utf8_string; + } else if (utf8_string) { + free(utf8_string); + } + + return retval; +} + +static int handle_data_frame_attribute( + char *key, + rdata_sexptype_info_t val_info, + rdata_ctx_t *ctx +) { + rdata_error_t retval = RDATA_OK; + + if (strcmp(key, "names") == 0 && + val_info.header.type == RDATA_SEXPTYPE_CHARACTER_VECTOR + ) { + retval = read_string_vector( + val_info.header.attributes, + ctx->column_name_handler, ctx->user_ctx, ctx); + } else if (strcmp(key, "row.names") == 0 && + val_info.header.type == RDATA_SEXPTYPE_CHARACTER_VECTOR + ) { + retval = read_string_vector( + val_info.header.attributes, + ctx->row_name_handler, + ctx->user_ctx, ctx); + } else if (strcmp(key, "label.table") == 0) { + retval = recursive_discard(val_info.header, ctx); + } else { + retval = recursive_discard(val_info.header, ctx); + } + + return retval; +} + +static rdata_error_t read_attributes(int (*handle_attribute)( + char *key, + rdata_sexptype_info_t val_info, + rdata_ctx_t *ctx), + rdata_ctx_t *ctx +) { + rdata_sexptype_info_t pairlist_info, val_info; + rdata_error_t retval = RDATA_OK; + char *key = NULL; + + retval = read_sexptype_header(&pairlist_info, ctx); + if (retval != RDATA_OK) + goto cleanup; + + while (pairlist_info.header.type == RDATA_SEXPTYPE_PAIRLIST) { + /* value */ + if ((retval = read_sexptype_header(&val_info, ctx)) != RDATA_OK) + goto cleanup; + + if (handle_attribute) { + if ((key = atom_table_lookup( + ctx->atom_table, pairlist_info.ref)) == NULL) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + if ((retval = handle_attribute(key, val_info, ctx)) != RDATA_OK) + goto cleanup; + } else { + if ((retval = recursive_discard( + val_info.header, + ctx)) != RDATA_OK + ) + goto cleanup; + } + + /* next */ + if ((retval = read_sexptype_header(&pairlist_info, ctx)) != RDATA_OK) + goto cleanup; + } + +cleanup: + return retval; +} + +static rdata_error_t read_wrap_real(const char *name, rdata_ctx_t *ctx) { + rdata_error_t retval = RDATA_OK; + rdata_sexptype_info_t sexptype_info; + /* pairlist */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if (sexptype_info.header.type != RDATA_SEXPTYPE_PAIRLIST) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + /* representation */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + + if ((retval = read_value_vector( + sexptype_info.header, + name, + ctx)) != RDATA_OK + ) + goto cleanup; + + /* alt representation */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if ((retval = recursive_discard(sexptype_info.header, ctx)) != RDATA_OK) + goto cleanup; + + /* nil */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if (sexptype_info.header.type != RDATA_PSEUDO_SXP_NIL) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + +cleanup: + return retval; +} + +static rdata_error_t read_compact_intseq( + const char *name, + rdata_ctx_t *ctx +) { + rdata_error_t retval = RDATA_OK; + rdata_sexptype_info_t sexptype_info; + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + + int32_t length; + if ((retval = read_length(&length, ctx)) != RDATA_OK) + goto cleanup; + if (length != 3) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + + double vals[3]; + if (read_st(ctx, vals, sizeof(vals)) != sizeof(vals)) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + if (ctx->machine_needs_byteswap) { + vals[0] = byteswap_double(vals[0]); + vals[1] = byteswap_double(vals[1]); + vals[2] = byteswap_double(vals[2]); + } + + if (sexptype_info.header.attributes) { + if ((retval = read_attributes( + &handle_vector_attribute, ctx)) != RDATA_OK + ) + goto cleanup; + } + + if (ctx->column_handler) { + int32_t *integers = rdata_malloc(vals[0] * sizeof(int32_t)); + int32_t val = vals[1]; + for (int i=0; i < vals[0]; i++) { + integers[i] = val; + val += vals[2]; + } + int cb_retval = ctx->column_handler( + name, + RDATA_TYPE_INT32, + integers, + vals[0], ctx->user_ctx); + free(integers); + if (cb_retval) { + retval = RDATA_ERROR_USER_ABORT; + goto cleanup; + } + } + + /* nil */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if (sexptype_info.header.type != RDATA_PSEUDO_SXP_NIL) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } +cleanup: + return retval; +} + +static int deferred_string_handler( + const char *name, + enum rdata_type_e type, + void *vals, + long length, + void *user_ctx +) { + rdata_ctx_t *ctx = (rdata_ctx_t *)user_ctx; + if (ctx->column_handler) + ctx->column_handler( + name, + RDATA_TYPE_STRING, + NULL, + length, + ctx->user_ctx); + if (ctx->text_value_handler) { + for (int i=0; i < length; i++) { + char buf[128] = { 0 }; + if (type == RDATA_TYPE_REAL) { + snprintf(buf, sizeof(buf), "%.0lf", ((double *)vals)[i]); + } else if (type == RDATA_TYPE_INT32) { + snprintf(buf, sizeof(buf), "%d", ((int32_t *)vals)[i]); + } + ctx->text_value_handler(buf, i, ctx->user_ctx); + } + } + return 0; +} + +static rdata_error_t read_deferred_string( + const char *name, + rdata_ctx_t *ctx +) { + rdata_error_t retval = RDATA_OK; + rdata_sexptype_info_t sexptype_info; + /* pairlist */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if (sexptype_info.header.type != RDATA_SEXPTYPE_PAIRLIST) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + /* representation */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + + if ((retval = read_value_vector_cb( + sexptype_info.header, + name, + &deferred_string_handler, + ctx, + ctx)) != RDATA_OK + ) + goto cleanup; + + /* alt representation */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if ((retval = recursive_discard(sexptype_info.header, ctx)) != RDATA_OK) + goto cleanup; + + /* nil */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if (sexptype_info.header.type != RDATA_PSEUDO_SXP_NIL) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + +cleanup: + return retval; +} + +static rdata_error_t read_altrep_vector( + const char *name, + rdata_ctx_t *ctx +) { + rdata_error_t retval = RDATA_OK; + rdata_sexptype_info_t sexptype_info; + /* pairlist */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if (sexptype_info.header.type != RDATA_SEXPTYPE_PAIRLIST) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + /* class name */ + char *class = NULL; + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if (sexptype_info.header.type == RDATA_SEXPTYPE_SYMBOL) { + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if (sexptype_info.header.type != RDATA_SEXPTYPE_CHARACTER_STRING) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + if ((retval = read_character_string(&class, ctx)) != RDATA_OK) + goto cleanup; + + atom_table_add(ctx->atom_table, class); + } else if (sexptype_info.header.type == RDATA_PSEUDO_SXP_REF) { + if ((class = atom_table_lookup( + ctx->atom_table, + sexptype_info.ref)) == NULL + ) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + } else { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + + /* package and class ID */ + if ((retval = read_sexptype_header(&sexptype_info, ctx)) != RDATA_OK) + goto cleanup; + if (sexptype_info.header.type != RDATA_SEXPTYPE_PAIRLIST) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + if ((retval = recursive_discard(sexptype_info.header, ctx)) != RDATA_OK) + goto cleanup; + + if (strcmp(class, "wrap_real") == 0) { + if ((retval = read_wrap_real(name, ctx)) != RDATA_OK) + goto cleanup; + } else if (strcmp(class, "compact_intseq") == 0) { + if ((retval = read_compact_intseq(name, ctx)) != RDATA_OK) + goto cleanup; + } else if (strcmp(class, "deferred_string") == 0) { + if ((retval = read_deferred_string(name, ctx)) != RDATA_OK) + goto cleanup; + } else { + if (ctx->error_handler) { + char error_buf[1024]; + snprintf( + error_buf, + sizeof(error_buf), + "Unrecognized ALTREP class: %s\n", + class); + ctx->error_handler(error_buf, ctx->user_ctx); + } + retval = RDATA_ERROR_UNSUPPORTED_STORAGE_CLASS; + } +cleanup: + return retval; +} + +static rdata_error_t read_generic_list(int attributes, rdata_ctx_t *ctx) { + rdata_error_t retval = RDATA_OK; + int32_t length; + unsigned int i; + rdata_sexptype_info_t sexptype_info; + + if ((retval = read_length(&length, ctx)) != RDATA_OK) + goto cleanup; + + for (i=0; i < (uint32_t)length; i++) { + if ((retval = read_sexptype_header( + &sexptype_info, ctx)) != RDATA_OK + ) + goto cleanup; + + if (sexptype_info.header.type == RDATA_SEXPTYPE_CHARACTER_VECTOR) { + int32_t vec_length; + + if ((retval = read_length(&vec_length, ctx)) != RDATA_OK) + goto cleanup; + if (ctx->is_dimnames) { + retval = read_string_vector_n( + sexptype_info.header.attributes, + vec_length, + ctx->dim_name_handler, + ctx->user_ctx, ctx); + } else { + if (ctx->column_handler) { + if (ctx->column_handler( + NULL, + RDATA_TYPE_STRING, + NULL, + vec_length, + ctx->user_ctx) + ) { + retval = RDATA_ERROR_USER_ABORT; + goto cleanup; + } + } + retval = read_string_vector_n( + sexptype_info.header.attributes, + vec_length, + ctx->text_value_handler, + ctx->user_ctx, ctx); + } + } else if (sexptype_info.header.type == RDATA_PSEUDO_SXP_ALTREP) { + retval = read_altrep_vector(NULL, ctx); + } else if (sexptype_info.header.type == RDATA_PSEUDO_SXP_NIL) { + if (ctx->is_dimnames && + ctx->dim_name_handler && + i < sizeof(ctx->dims)/sizeof(ctx->dims[0]) + ) { + int j; + for (j=0; j < ctx->dims[i]; j++) { + ctx->dim_name_handler(NULL, j, ctx->user_ctx); + } + } + } else { + retval = read_value_vector(sexptype_info.header, NULL, ctx); + } + if (retval != RDATA_OK) + goto cleanup; + } + + if (attributes) { + if ((retval = read_attributes( + &handle_data_frame_attribute, + ctx)) != RDATA_OK + ) + goto cleanup; + } + +cleanup: + + if (ctx->is_dimnames) + ctx->is_dimnames = false; + + return retval; +} + +static rdata_error_t read_length(int32_t *outLength, rdata_ctx_t *ctx) { + int32_t length; + rdata_error_t retval = RDATA_OK; + + if (read_st(ctx, &length, sizeof(length)) != sizeof(length)) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + + if (ctx->machine_needs_byteswap) + length = byteswap4(length); + + if (outLength) + *outLength = length; + +cleanup: + + return retval; +} + +static rdata_error_t read_string_vector_n( + int attributes, + int32_t length, + rdata_text_value_handler text_value_handler, + void *callback_ctx, + rdata_ctx_t *ctx +) { + int32_t string_length; + rdata_error_t retval = RDATA_OK; + rdata_sexptype_info_t info; + size_t buffer_size = 4096; + char *buffer = NULL; + size_t utf8_buffer_size = 16384; + char *utf8_buffer = NULL; + int i; + + buffer = rdata_malloc(buffer_size); + if (ctx->converter) + utf8_buffer = rdata_malloc(utf8_buffer_size); + + for (i=0; i < length; i++) { + if ((retval = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + + if (info.header.type != RDATA_SEXPTYPE_CHARACTER_STRING) { + retval = RDATA_ERROR_PARSE; + goto cleanup; + } + + if ((retval = read_length(&string_length, ctx)) != RDATA_OK) + goto cleanup; + + int32_t str_len_calc = string_length + 1; + if (str_len_calc > 0) { + if ((uint32_t)str_len_calc > buffer_size) { + buffer_size = str_len_calc; + if ((buffer = rdata_realloc(buffer, buffer_size)) == NULL) { + retval = RDATA_ERROR_MALLOC; + goto cleanup; + } + } + } + + if (string_length >= 0) { + if (read_st(ctx, buffer, string_length) != string_length) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + buffer[string_length] = '\0'; + } + + if (text_value_handler) { + int cb_retval = 0; + if (string_length < 0) { + cb_retval = text_value_handler(NULL, i, callback_ctx); + } else if (!ctx->converter) { + cb_retval = text_value_handler(buffer, i, callback_ctx); + } else { + int32_t str_len_calc = 4*string_length + 1; + if (str_len_calc >= 0) { + if ((uint32_t)str_len_calc > utf8_buffer_size) { + utf8_buffer_size = str_len_calc; + if ((utf8_buffer = rdata_realloc( + utf8_buffer, utf8_buffer_size)) == NULL + ) { + retval = RDATA_ERROR_MALLOC; + goto cleanup; + } + } + } + + retval = rdata_convert( + utf8_buffer, + utf8_buffer_size, + buffer, string_length, + ctx->converter); + if (retval != RDATA_OK) + goto cleanup; + + cb_retval = text_value_handler(utf8_buffer, i, callback_ctx); + } + if (cb_retval) { + retval = RDATA_ERROR_USER_ABORT; + goto cleanup; + } + } + } + + if (attributes) { + if ((retval = read_attributes( + &handle_vector_attribute, + ctx)) != RDATA_OK) + goto cleanup; + } + +cleanup: + + if (buffer) + free(buffer); + if (utf8_buffer) + free(utf8_buffer); + + return retval; +} + +static rdata_error_t read_string_vector( + int attributes, + rdata_text_value_handler text_value_handler, + void *callback_ctx, + rdata_ctx_t *ctx +) { + rdata_error_t retval = RDATA_OK; + int32_t length; + + if ((retval = read_length(&length, ctx)) != RDATA_OK) + return retval; + + return read_string_vector_n( + attributes, + length, + text_value_handler, + callback_ctx, + ctx); +} + +static rdata_error_t read_value_vector_cb( + rdata_sexptype_header_t header, + const char *name, + rdata_column_handler column_handler, + void *user_ctx, + rdata_ctx_t *ctx +) { + rdata_error_t retval = RDATA_OK; + int32_t length; + size_t input_elem_size = 0; + void *vals = NULL; + size_t buf_len = 0; + enum rdata_type_e output_data_type; + unsigned int i; + + switch (header.type) { + case RDATA_SEXPTYPE_REAL_VECTOR: + input_elem_size = sizeof(double); + output_data_type = RDATA_TYPE_REAL; + break; + case RDATA_SEXPTYPE_INTEGER_VECTOR: + input_elem_size = sizeof(int32_t); + output_data_type = RDATA_TYPE_INT32; + break; + case RDATA_SEXPTYPE_LOGICAL_VECTOR: + input_elem_size = sizeof(int32_t); + output_data_type = RDATA_TYPE_LOGICAL; + break; + default: + retval = RDATA_ERROR_PARSE; + break; + } + if (retval != RDATA_OK) + goto cleanup; + + if ((retval = read_length(&length, ctx)) != RDATA_OK) + goto cleanup; + + buf_len = length * input_elem_size; + + if (buf_len) { + vals = rdata_malloc(buf_len); + if (vals == NULL) { + retval = RDATA_ERROR_MALLOC; + goto cleanup; + } + + ssize_t result_st = read_st(ctx, vals, buf_len); + + if (result_st > 0) { + if ((size_t)result_st != buf_len) { + retval = RDATA_ERROR_READ; + goto cleanup; + } + } else { + retval = RDATA_ERROR_READ; + goto cleanup; + } + + if (ctx->machine_needs_byteswap) { + if (input_elem_size == sizeof(double)) { + double *d_vals = (double *)vals; + for (i=0; i < buf_len/sizeof(double); i++) { + d_vals[i] = byteswap_double(d_vals[i]); + } + } else { + uint32_t *i_vals = (uint32_t *)vals; + for (i=0; i < buf_len/sizeof(uint32_t); i++) { + i_vals[i] = byteswap4(i_vals[i]); + } + } + } + } + + ctx->column_class = 0; + if (header.attributes) { + if ((retval = read_attributes( + &handle_vector_attribute, + ctx)) != RDATA_OK) + goto cleanup; + } + if (ctx->column_class == RDATA_CLASS_POSIXCT) + output_data_type = RDATA_TYPE_TIMESTAMP; + if (ctx->column_class == RDATA_CLASS_DATE) + output_data_type = RDATA_TYPE_DATE; + + if (column_handler) { + if (column_handler(name, output_data_type, vals, length, user_ctx)) { + retval = RDATA_ERROR_USER_ABORT; + goto cleanup; + } + } + +cleanup: + if (vals) + free(vals); + + return retval; +} + +static rdata_error_t read_value_vector( + rdata_sexptype_header_t header, + const char *name, + rdata_ctx_t *ctx +) { + return read_value_vector_cb( + header, + name, + ctx->column_handler, + ctx->user_ctx, ctx); +} + +static rdata_error_t discard_vector( + rdata_sexptype_header_t sexptype_header, + size_t element_size, + rdata_ctx_t *ctx +) { + int32_t length; + rdata_error_t retval = RDATA_OK; + + if ((retval = read_length(&length, ctx)) != RDATA_OK) + goto cleanup; + + if (length > 0) { + if (lseek_st(ctx, length * element_size) == -1) { + return RDATA_ERROR_SEEK; + } + } else if (ctx->error_handler) { + char error_buf[1024]; + snprintf( + error_buf, + sizeof(error_buf), + "Vector with non-positive length: %d\n", + length); + ctx->error_handler(error_buf, ctx->user_ctx); + } + + if (sexptype_header.attributes) { + rdata_sexptype_info_t temp_info; + if ((retval = read_sexptype_header(&temp_info, ctx)) != RDATA_OK) + goto cleanup; + + retval = recursive_discard(temp_info.header, ctx); + } + +cleanup: + + return retval; +} + +static rdata_error_t discard_character_string( + int add_to_table, + rdata_ctx_t *ctx +) { + rdata_error_t retval = RDATA_OK; + char *key = NULL; + + if ((retval = read_character_string(&key, ctx)) != RDATA_OK) + goto cleanup; + + if (strlen(key) > 0 && add_to_table) { + atom_table_add(ctx->atom_table, key); + } + + free(key); + +cleanup: + + return retval; +} + +static rdata_error_t discard_pairlist( + rdata_sexptype_header_t sexptype_header, + rdata_ctx_t *ctx +) { + rdata_sexptype_info_t temp_info; + rdata_error_t error = 0; + while (1) { + switch (sexptype_header.type) { + case RDATA_SEXPTYPE_PAIRLIST: + /* value */ + if ((error = read_sexptype_header( + &temp_info, + ctx)) != RDATA_OK) + return error; + if ((error = recursive_discard( + temp_info.header, + ctx)) != RDATA_OK) + return error; + + /* tail */ + if ((error = read_sexptype_header( + &temp_info, + ctx)) != RDATA_OK) + return error; + sexptype_header = temp_info.header; + break; + case RDATA_PSEUDO_SXP_NIL: + goto done; + default: + return RDATA_ERROR_PARSE; + } + } +done: + + return 0; +} + +static rdata_error_t recursive_discard( + rdata_sexptype_header_t sexptype_header, + rdata_ctx_t *ctx +) { + uint32_t length; + rdata_sexptype_info_t info; + rdata_sexptype_info_t prot, tag; + + rdata_error_t error = 0; + unsigned int i; + + switch (sexptype_header.type) { + case RDATA_SEXPTYPE_SYMBOL: + if ((error = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + + if ((error = recursive_discard(info.header, ctx)) != RDATA_OK) + goto cleanup; + break; + case RDATA_PSEUDO_SXP_PERSIST: + case RDATA_PSEUDO_SXP_NAMESPACE: + case RDATA_PSEUDO_SXP_PACKAGE: + if ((error = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + + if ((error = recursive_discard(info.header, ctx)) != RDATA_OK) + goto cleanup; + break; + case RDATA_SEXPTYPE_BUILTIN_FUNCTION: + case RDATA_SEXPTYPE_SPECIAL_FUNCTION: + error = discard_character_string(0, ctx); + break; + case RDATA_SEXPTYPE_PAIRLIST: + error = discard_pairlist(sexptype_header, ctx); + break; + case RDATA_SEXPTYPE_CHARACTER_STRING: + error = discard_character_string(1, ctx); + break; + case RDATA_SEXPTYPE_RAW_VECTOR: + error = discard_vector(sexptype_header, 1, ctx); + break; + case RDATA_SEXPTYPE_LOGICAL_VECTOR: + error = discard_vector(sexptype_header, 4, ctx); + break; + case RDATA_SEXPTYPE_INTEGER_VECTOR: + error = discard_vector(sexptype_header, 4, ctx); + break; + case RDATA_SEXPTYPE_REAL_VECTOR: + error = discard_vector(sexptype_header, 8, ctx); + break; + case RDATA_SEXPTYPE_COMPLEX_VECTOR: + error = discard_vector(sexptype_header, 16, ctx); + break; + case RDATA_SEXPTYPE_CHARACTER_VECTOR: + case RDATA_SEXPTYPE_GENERIC_VECTOR: + case RDATA_SEXPTYPE_EXPRESSION_VECTOR: + if (read_st(ctx, &length, sizeof(length)) != sizeof(length)) { + return RDATA_ERROR_READ; + } + if (ctx->machine_needs_byteswap) + length = byteswap4(length); + + for (i=0; i < length; i++) { + if ((error = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + + if (sexptype_header.type == RDATA_SEXPTYPE_CHARACTER_VECTOR) { + if (info.header.type != RDATA_SEXPTYPE_CHARACTER_STRING) { + error = RDATA_ERROR_PARSE; + goto cleanup; + } + + if ((error = discard_character_string(0, ctx)) != RDATA_OK) + goto cleanup; + } else if ((error = recursive_discard( + info.header, + ctx)) != RDATA_OK) { + goto cleanup; + } + } + if (sexptype_header.attributes) { + if ((error = read_attributes(NULL, ctx)) != RDATA_OK) + goto cleanup; + } + break; + case RDATA_SEXPTYPE_DOT_DOT_DOT: + case RDATA_SEXPTYPE_PROMISE: + case RDATA_SEXPTYPE_LANGUAGE_OBJECT: + case RDATA_SEXPTYPE_CLOSURE: + if (sexptype_header.attributes) { + if ((error = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + + if ((error = recursive_discard(info.header, ctx)) != RDATA_OK) + goto cleanup; + } + if (sexptype_header.tag) { + if ((error = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + + if ((error = recursive_discard(info.header, ctx)) != RDATA_OK) + goto cleanup; + } + /* CAR */ + if ((error = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + + if ((error = recursive_discard(info.header, ctx)) != RDATA_OK) + goto cleanup; + + /* CDR */ + if ((error = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + + if ((error = recursive_discard(info.header, ctx)) != RDATA_OK) + goto cleanup; + break; + case RDATA_SEXPTYPE_EXTERNAL_POINTER: + read_sexptype_header(&prot, ctx); + recursive_discard(prot.header, ctx); + + read_sexptype_header(&tag, ctx); + recursive_discard(tag.header, ctx); + break; + case RDATA_SEXPTYPE_ENVIRONMENT: + /* locked */ + if (lseek_st(ctx, sizeof(uint32_t)) == -1) { + return RDATA_ERROR_SEEK; + } + + rdata_sexptype_info_t enclosure, frame, hash_table, attributes; + read_sexptype_header(&enclosure, ctx); + recursive_discard(enclosure.header, ctx); + + read_sexptype_header(&frame, ctx); + recursive_discard(frame.header, ctx); + + read_sexptype_header(&hash_table, ctx); + recursive_discard(hash_table.header, ctx); + + read_sexptype_header(&attributes, ctx); + recursive_discard(attributes.header, ctx); + /* + if (sexptype_header.attributes) { + if (lseek(ctx->fd, sizeof(uint32_t), SEEK_CUR) == -1) { + return RDATA_ERROR_SEEK; + } + } */ + break; + case RDATA_PSEUDO_SXP_REF: + case RDATA_PSEUDO_SXP_NIL: + case RDATA_PSEUDO_SXP_GLOBAL_ENVIRONMENT: + case RDATA_PSEUDO_SXP_UNBOUND_VALUE: + case RDATA_PSEUDO_SXP_MISSING_ARGUMENT: + case RDATA_PSEUDO_SXP_BASE_NAMESPACE: + case RDATA_PSEUDO_SXP_EMPTY_ENVIRONMENT: + case RDATA_PSEUDO_SXP_BASE_ENVIRONMENT: + break; + case RDATA_PSEUDO_SXP_ALTREP: + /* class, package, type */ + if ((error = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + if ((error = recursive_discard(info.header, ctx)) != RDATA_OK) + goto cleanup; + + while (1) { + if ((error = read_sexptype_header(&info, ctx)) != RDATA_OK) + goto cleanup; + if (info.header.type == RDATA_SEXPTYPE_PAIRLIST) + continue; + if (info.header.type == RDATA_PSEUDO_SXP_NIL) + break; + if ((error = recursive_discard(info.header, ctx)) != RDATA_OK) + goto cleanup; + } + break; + default: + if (ctx->error_handler) { + char error_buf[1024]; + snprintf( + error_buf, + sizeof(error_buf), + "Unhandled S-Expression: %d", + sexptype_header.type); + ctx->error_handler(error_buf, ctx->user_ctx); + } + return RDATA_ERROR_UNSUPPORTED_S_EXPRESSION; + } +cleanup: + + return error; +} diff --git a/pandas/_libs/src/librdata/rdata_write.c b/pandas/_libs/src/librdata/rdata_write.c new file mode 100644 index 0000000000000..0383dd85f4ace --- /dev/null +++ b/pandas/_libs/src/librdata/rdata_write.c @@ -0,0 +1,704 @@ +/* +Copyright (c) 2020 Evan Miller +*/ + +#include <stdlib.h> +#include <string.h> +#include <time.h> + +#include "CKHashTable.h" +#include "rdata.h" +#include "rdata_internal.h" + +#define R_TAG 0x01 +#define R_OBJECT 0x02 +#define R_ATTRIBUTES 0x04 + +#define INITIAL_COLUMNS_CAPACITY 100 + +#ifdef _WIN32 +#define timegm _mkgmtime +#endif + +rdata_writer_t *rdata_writer_init( + rdata_data_writer write_callback, + rdata_file_format_t format +) { + rdata_writer_t *writer = calloc(1, sizeof(rdata_writer_t)); + writer->file_format = format; + writer->bswap = machine_is_little_endian(); + writer->atom_table = ck_hash_table_init(100, 24); + writer->data_writer = write_callback; + + writer->columns_capacity = INITIAL_COLUMNS_CAPACITY; + writer->columns = malloc( + writer->columns_capacity * sizeof(rdata_column_t *)); + + return writer; +} + +void rdata_writer_free(rdata_writer_t *writer) { + ck_hash_table_free(writer->atom_table); + int i, j; + for (i=0; i < writer->columns_count; i++) { + rdata_column_t *column = writer->columns[i]; + for (j=0; j < column->factor_count; j++) { + free(column->factor[j]); + } + free(column->factor); + free(column); + } + free(writer->columns); + free(writer); +} + +rdata_column_t *rdata_add_column( + rdata_writer_t *writer, + const char *name, + rdata_type_t type +) { + if (writer->columns_count == writer->columns_capacity) { + writer->columns_capacity *= 2; + writer->columns = realloc(writer->columns, + writer->columns_capacity * sizeof(rdata_column_t *)); + } + rdata_column_t *new_column = calloc(1, sizeof(rdata_column_t)); + + new_column->index = writer->columns_count++; + + writer->columns[new_column->index] = new_column; + + new_column->type = type; + + if (name) { + snprintf(new_column->name, sizeof(new_column->name), "%s", name); + } + + return new_column; +} + +rdata_column_t *rdata_get_column(rdata_writer_t *writer, int32_t j) { + return writer->columns[j]; +} + +rdata_error_t rdata_column_set_label( + rdata_column_t *column, + const char *label +) { + snprintf(column->label, sizeof(column->label), "%s", label); + return RDATA_OK; +} + +rdata_error_t rdata_column_add_factor( + rdata_column_t *column, + const char *factor +) { + if (column->type != RDATA_TYPE_INT32) + return RDATA_ERROR_FACTOR; + + char *factor_copy = malloc(strlen(factor)+1); + strcpy(factor_copy, factor); // NOLINT + + column->factor_count++; + column->factor = realloc( + column->factor, + sizeof(char *) * column->factor_count); + column->factor[column->factor_count-1] = factor_copy; + + return RDATA_OK; +} + +static rdata_error_t rdata_write_bytes( + rdata_writer_t *writer, + const void *data, size_t len +) { + size_t bytes_written = writer->data_writer(data, len, writer->user_ctx); + if (bytes_written < len) { + return RDATA_ERROR_WRITE; + } + writer->bytes_written += bytes_written; + return RDATA_OK; +} + +static rdata_error_t rdata_write_integer( + rdata_writer_t *writer, + int32_t val +) { + if (writer->bswap) { + val = byteswap4(val); + } + return rdata_write_bytes(writer, &val, sizeof(val)); +} + +static rdata_error_t rdata_write_double(rdata_writer_t *writer, double val) { + if (writer->bswap) { + val = byteswap_double(val); + } + return rdata_write_bytes(writer, &val, sizeof(val)); +} + +static rdata_error_t rdata_write_header( + rdata_writer_t *writer, + int type, + int flags +) { + rdata_sexptype_header_t header; + memset(&header, 0, sizeof(header)); + + header.type = type; + header.object = !!(flags & R_OBJECT); + header.tag = !!(flags & R_TAG); + header.attributes = !!(flags & R_ATTRIBUTES); + + uint32_t sexp_int; + + memcpy(&sexp_int, &header, sizeof(header)); + + return rdata_write_integer(writer, sexp_int); +} + +static rdata_error_t rdata_write_string( + rdata_writer_t *writer, + const char *string +) { + rdata_error_t retval = RDATA_OK; + + retval = rdata_write_header(writer, RDATA_SEXPTYPE_CHARACTER_STRING, 0); + if (retval != RDATA_OK) + goto cleanup; + + ssize_t len = string ? (ssize_t)strlen(string) : -1; + + retval = rdata_write_integer(writer, len); + if (retval != RDATA_OK) + goto cleanup; + + if (len > 0) + return rdata_write_bytes(writer, string, len); + +cleanup: + return retval; +} + +static rdata_error_t rdata_write_pairlist_key( + rdata_writer_t *writer, + const char *key +) { + rdata_error_t retval = RDATA_OK; + ck_hash_table_t *atom_table = (ck_hash_table_t *)writer->atom_table; + uint64_t ref = (uint64_t)ck_str_hash_lookup(key, atom_table); + if (ref == 0) { + ck_str_hash_insert(key, (void *)(atom_table->count + 1), atom_table); + + retval = rdata_write_integer(writer, 1); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_string(writer, key); + } else { + retval = rdata_write_integer(writer, (ref << 8) | 0xFF); + } + +cleanup: + return retval; +} + +static rdata_error_t rdata_write_pairlist_header( + rdata_writer_t *writer, + const char *key +) { + rdata_error_t retval = RDATA_OK; + + retval = rdata_write_header(writer, RDATA_SEXPTYPE_PAIRLIST, R_TAG); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_pairlist_key(writer, key); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +static rdata_error_t rdata_write_attributed_vector_header( + rdata_writer_t *writer, int type, + int32_t size +) { + rdata_error_t retval = RDATA_OK; + + retval = rdata_write_header(writer, type, R_OBJECT | R_ATTRIBUTES); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_integer(writer, size); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +static rdata_error_t rdata_write_simple_vector_header( + rdata_writer_t *writer, + int type, + int32_t size +) { + rdata_error_t retval = RDATA_OK; + + retval = rdata_write_header(writer, type, 0); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_integer(writer, size); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +static rdata_error_t rdata_write_class_pairlist( + rdata_writer_t *writer, + const char *class +) { + rdata_error_t retval = RDATA_OK; + + retval = rdata_write_pairlist_header(writer, "class"); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_simple_vector_header( + writer, + RDATA_SEXPTYPE_CHARACTER_VECTOR, + 1); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_string(writer, class); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +rdata_error_t rdata_begin_file( + rdata_writer_t *writer, + void *user_ctx +) { + rdata_error_t retval = RDATA_OK; + + writer->user_ctx = user_ctx; + + if (writer->file_format == RDATA_WORKSPACE) { + retval = rdata_write_bytes(writer, "RDX2\n", 5); + if (retval != RDATA_OK) + goto cleanup; + } + + rdata_v2_header_t v2_header; + memcpy(v2_header.header, "X\n", sizeof("X\n")-1); + v2_header.format_version = 2; + v2_header.reader_version = 131840; + v2_header.writer_version = 131840; + + if (writer->bswap) { + v2_header.format_version = byteswap4(v2_header.format_version); + v2_header.reader_version = byteswap4(v2_header.reader_version); + v2_header.writer_version = byteswap4(v2_header.writer_version); + } + + retval = rdata_write_bytes(writer, &v2_header, sizeof(v2_header)); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +rdata_error_t rdata_begin_table( + rdata_writer_t *writer, + const char *variable_name +) { + rdata_error_t retval = RDATA_OK; + + if (writer->file_format == RDATA_WORKSPACE) { + retval = rdata_write_pairlist_header(writer, variable_name); + if (retval != RDATA_OK) + goto cleanup; + } + + retval = rdata_write_attributed_vector_header( + writer, + RDATA_SEXPTYPE_GENERIC_VECTOR, + writer->columns_count); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +static rdata_error_t rdata_begin_factor_column( + rdata_writer_t *writer, + rdata_column_t *column, + int32_t row_count +) { + return rdata_write_attributed_vector_header( + writer, + RDATA_SEXPTYPE_INTEGER_VECTOR, + row_count); +} + +static rdata_error_t rdata_end_factor_column( + rdata_writer_t *writer, + rdata_column_t *column +) { + int i; + + rdata_error_t retval = RDATA_OK; + + retval = rdata_write_pairlist_header(writer, "levels"); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_simple_vector_header(writer, + RDATA_SEXPTYPE_CHARACTER_VECTOR, column->factor_count); + if (retval != RDATA_OK) + goto cleanup; + + for (i=0; i < column->factor_count; i++) { + retval = rdata_write_string(writer, column->factor[i]); + if (retval != RDATA_OK) + goto cleanup; + } + + retval = rdata_write_class_pairlist(writer, "factor"); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_header(writer, RDATA_PSEUDO_SXP_NIL, 0); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +static rdata_error_t rdata_begin_real_column( + rdata_writer_t *writer, + rdata_column_t *column, + int32_t row_count +) { + return rdata_write_simple_vector_header( + writer, + RDATA_SEXPTYPE_REAL_VECTOR, + row_count); +} + +static rdata_error_t rdata_end_real_column( + rdata_writer_t *writer, + rdata_column_t *column +) { + return RDATA_OK; +} + +static rdata_error_t rdata_begin_timestamp_column( + rdata_writer_t *writer, + rdata_column_t *column, + int32_t row_count +) { + return rdata_write_attributed_vector_header( + writer, + RDATA_SEXPTYPE_REAL_VECTOR, + row_count); +} + +static rdata_error_t rdata_end_timestamp_column( + rdata_writer_t *writer, + rdata_column_t *column +) { + rdata_error_t retval = RDATA_OK; + + retval = rdata_write_class_pairlist(writer, "POSIXct"); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_header(writer, RDATA_PSEUDO_SXP_NIL, 0); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +static rdata_error_t rdata_begin_date_column( + rdata_writer_t *writer, + rdata_column_t *column, + int32_t row_count +) { + return rdata_write_attributed_vector_header( + writer, + RDATA_SEXPTYPE_REAL_VECTOR, + row_count); +} + +static rdata_error_t rdata_end_date_column( + rdata_writer_t *writer, + rdata_column_t *column +) { + rdata_error_t retval = RDATA_OK; + + retval = rdata_write_class_pairlist(writer, "Date"); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_header(writer, RDATA_PSEUDO_SXP_NIL, 0); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +static rdata_error_t rdata_begin_integer_column( + rdata_writer_t *writer, + rdata_column_t *column, + int32_t row_count +) { + return rdata_write_simple_vector_header( + writer, + RDATA_SEXPTYPE_INTEGER_VECTOR, + row_count); +} + +static rdata_error_t rdata_end_integer_column( + rdata_writer_t *writer, + rdata_column_t *column +) { + return RDATA_OK; +} + +static rdata_error_t rdata_begin_logical_column( + rdata_writer_t *writer, + rdata_column_t *column, + int32_t row_count +) { + return rdata_write_simple_vector_header( + writer, + RDATA_SEXPTYPE_LOGICAL_VECTOR, + row_count); +} + +static rdata_error_t rdata_end_logical_column( + rdata_writer_t *writer, + rdata_column_t *column +) { + return RDATA_OK; +} + +static rdata_error_t rdata_begin_string_column( + rdata_writer_t *writer, + rdata_column_t *column, + int32_t row_count +) { + return rdata_write_simple_vector_header( + writer, + RDATA_SEXPTYPE_CHARACTER_VECTOR, + row_count); +} + +static rdata_error_t rdata_end_string_column( + rdata_writer_t *writer, + rdata_column_t *column +) { + return RDATA_OK; +} + +rdata_error_t rdata_begin_column( + rdata_writer_t *writer, + rdata_column_t *column, + int32_t row_count +) { + rdata_type_t type = column->type; + + if (type == RDATA_TYPE_INT32) { + if (column->factor_count) + return rdata_begin_factor_column(writer, column, row_count); + return rdata_begin_integer_column(writer, column, row_count); + } + if (type == RDATA_TYPE_REAL) + return rdata_begin_real_column(writer, column, row_count); + if (type == RDATA_TYPE_TIMESTAMP) + return rdata_begin_timestamp_column(writer, column, row_count); + if (type == RDATA_TYPE_DATE) + return rdata_begin_date_column(writer, column, row_count); + if (type == RDATA_TYPE_LOGICAL) + return rdata_begin_logical_column(writer, column, row_count); + if (type == RDATA_TYPE_STRING) + return rdata_begin_string_column(writer, column, row_count); + + return RDATA_OK; +} + +rdata_error_t rdata_append_real_value( + rdata_writer_t *writer, + double value +) { + return rdata_write_double(writer, value); +} + +rdata_error_t rdata_append_int32_value( + rdata_writer_t *writer, + int32_t value +) { + return rdata_write_integer(writer, value); +} + +rdata_error_t rdata_append_timestamp_value( + rdata_writer_t *writer, + time_t value +) { + return rdata_write_double(writer, value); +} + +rdata_error_t rdata_append_date_value( + rdata_writer_t *writer, + struct tm *value +) { + return rdata_write_double(writer, timegm(value) / 86400); +} + +rdata_error_t rdata_append_logical_value( + rdata_writer_t *writer, + int value +) { + if (value < 0) + return rdata_write_integer(writer, INT32_MIN); + + return rdata_write_integer(writer, (value > 0)); +} + +rdata_error_t rdata_append_string_value( + rdata_writer_t *writer, + const char *value +) { + return rdata_write_string(writer, value); +} + +rdata_error_t rdata_end_column( + rdata_writer_t *writer, + rdata_column_t *column +) { + rdata_type_t type = column->type; + + if (type == RDATA_TYPE_INT32) { + if (column->factor_count) + return rdata_end_factor_column(writer, column); + return rdata_end_integer_column(writer, column); + } + if (type == RDATA_TYPE_REAL) + return rdata_end_real_column(writer, column); + if (type == RDATA_TYPE_TIMESTAMP) + return rdata_end_timestamp_column(writer, column); + if (type == RDATA_TYPE_DATE) + return rdata_end_date_column(writer, column); + if (type == RDATA_TYPE_LOGICAL) + return rdata_end_logical_column(writer, column); + if (type == RDATA_TYPE_STRING) + return rdata_end_string_column(writer, column); + + return RDATA_OK; +} + +rdata_error_t rdata_end_table( + rdata_writer_t *writer, + int32_t row_count, + const char *datalabel +) { + int i; + rdata_error_t retval = RDATA_OK; + + retval = rdata_write_pairlist_header(writer, "datalabel"); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_simple_vector_header( + writer, + RDATA_SEXPTYPE_CHARACTER_VECTOR, + 1); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_string(writer, datalabel); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_pairlist_header(writer, "names"); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_simple_vector_header(writer, + RDATA_SEXPTYPE_CHARACTER_VECTOR, writer->columns_count); + if (retval != RDATA_OK) + goto cleanup; + + for (i=0; i < writer->columns_count; i++) { + retval = rdata_write_string(writer, writer->columns[i]->name); + if (retval != RDATA_OK) + goto cleanup; + } + + retval = rdata_write_pairlist_header(writer, "var.labels"); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_simple_vector_header(writer, + RDATA_SEXPTYPE_CHARACTER_VECTOR, writer->columns_count); + if (retval != RDATA_OK) + goto cleanup; + + for (i=0; i < writer->columns_count; i++) { + retval = rdata_write_string(writer, writer->columns[i]->label); + if (retval != RDATA_OK) + goto cleanup; + } + + retval = rdata_write_class_pairlist(writer, "data.frame"); + if (retval != RDATA_OK) + goto cleanup; + + if (row_count > 0) { + retval = rdata_write_pairlist_header(writer, "row.names"); + if (retval != RDATA_OK) + goto cleanup; + + retval = rdata_write_simple_vector_header(writer, + RDATA_SEXPTYPE_CHARACTER_VECTOR, row_count); + if (retval != RDATA_OK) + goto cleanup; + + char buf[128]; + for (i=0; i < row_count; i++) { + snprintf(buf, sizeof(buf), "%d", i+1); + retval = rdata_write_string(writer, buf); + if (retval != RDATA_OK) + goto cleanup; + } + } + + retval = rdata_write_header(writer, RDATA_PSEUDO_SXP_NIL, 0); + if (retval != RDATA_OK) + goto cleanup; + +cleanup: + return retval; +} + +rdata_error_t rdata_end_file(rdata_writer_t *writer) { + if (writer->file_format == RDATA_WORKSPACE) + return rdata_write_header(writer, RDATA_PSEUDO_SXP_NIL, 0); + + return RDATA_OK; +} diff --git a/pandas/_libs/src/librdata/unix_iconv.h b/pandas/_libs/src/librdata/unix_iconv.h new file mode 100644 index 0000000000000..58ee38c36dd9c --- /dev/null +++ b/pandas/_libs/src/librdata/unix_iconv.h @@ -0,0 +1,60 @@ +/* Copyright (C) 1997-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +#ifndef PANDAS__LIBS_SRC_LIBRDATA_UNIX_ICONV_H_ +#define PANDAS__LIBS_SRC_LIBRDATA_UNIX_ICONV_H_ + +#ifndef _ICONV_H +#define _ICONV_H 1 + +#include <features.h> +#define __need_size_t +#include <stddef.h> + + +__BEGIN_DECLS + +/* Identifier for conversion method from one codeset to another. */ +typedef void *iconv_t; + + +/* Allocate descriptor for code conversion from codeset FROMCODE to + codeset TOCODE. + + This function is a possible cancellation point and therefore not + marked with __THROW. */ +extern iconv_t iconv_open(const char *__tocode, const char *__fromcode); + +/* Convert at most *INBYTESLEFT bytes from *INBUF according to the + code conversion algorithm specified by CD and place up to + *OUTBYTESLEFT bytes in buffer at *OUTBUF. */ +extern size_t iconv(iconv_t __cd, char **__restrict __inbuf, + size_t *__restrict __inbytesleft, + char **__restrict __outbuf, + size_t *__restrict __outbytesleft); + +/* Free resources allocated for descriptor CD for code conversion. + + This function is a possible cancellation point and therefore not + marked with __THROW. */ +extern int iconv_close(iconv_t __cd); + +__END_DECLS + +#endif /* iconv.h */ + +#endif // PANDAS__LIBS_SRC_LIBRDATA_UNIX_ICONV_H_ diff --git a/pandas/_libs/src/librdata/win_iconv.c b/pandas/_libs/src/librdata/win_iconv.c new file mode 100644 index 0000000000000..dd5ddc5882abd --- /dev/null +++ b/pandas/_libs/src/librdata/win_iconv.c @@ -0,0 +1,2236 @@ +/* + +win-iconv - iconv implementation using Win32 API to convert. +Written in 2009-2016 by Yukihiro Nakadaira <https://github.com/ynkdir> +and contributors to win-iconv <https://github.com/win-iconv/win-iconv> + +To the extent possible under law, the author(s) have dedicated all copyright +and related and neighboring rights to this software to the public domain +worldwide. This software is distributed without any warranty. + +You should have received a copy of the CC0 Public Domain Dedication along with +this software. If not, see http://creativecommons.org/publicdomain/zero/1.0/. + + */ + +/* for WC_NO_BEST_FIT_CHARS */ +#ifndef WINVER +# define WINVER 0x0500 +#endif + +#define STRICT +#include "win_iconv.h" +#include <windows.h> +#include <errno.h> +#include <string.h> +#include <stdlib.h> + +#ifdef __GNUC__ +#define UNUSED __attribute__((unused)) +#else +#define UNUSED +#endif + +/* WORKAROUND: */ +#ifndef UNDER_CE +#define GetProcAddressA GetProcAddress +#endif + +#if 0 +# define MAKE_EXE +# define MAKE_DLL +# define USE_LIBICONV_DLL +#endif + +#if !defined(DEFAULT_LIBICONV_DLL) +# define DEFAULT_LIBICONV_DLL "" +#endif + +#define MB_CHAR_MAX 16 + +#define UNICODE_MODE_BOM_DONE 1 +#define UNICODE_MODE_SWAPPED 2 + +#define FLAG_USE_BOM 1 +#define FLAG_TRANSLIT 2 +#define FLAG_IGNORE 4 + +typedef unsigned char uchar; +typedef unsigned short ushort; +typedef unsigned int uint; + +typedef void* iconv_t; + +iconv_t iconv_open(const char *tocode, const char *fromcode); +int iconv_close(iconv_t cd); +size_t iconv( + iconv_t cd, + const char **inbuf, + size_t *inbytesleft, + char **outbuf, + size_t *outbytesleft); + +/* libiconv interface for vim */ +#if defined(MAKE_DLL) +int iconvctl(iconv_t cd, int request, void* argument) { + /* not supported */ + return 0; +} +#endif + +typedef struct compat_t compat_t; +typedef struct csconv_t csconv_t; +typedef struct rec_iconv_t rec_iconv_t; + +typedef iconv_t (*f_iconv_open)(const char *tocode, const char *fromcode); +typedef int (*f_iconv_close)(iconv_t cd); +typedef size_t (*f_iconv)( + iconv_t cd, + const char **inbuf, + size_t *inbytesleft, + char **outbuf, + size_t *outbytesleft); +typedef int* (*f_errno)(void); +typedef int (*f_mbtowc)( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize); +typedef int (*f_wctomb)( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize); +typedef int (*f_mblen)(csconv_t *cv, const uchar *buf, int bufsize); +typedef int (*f_flush)(csconv_t *cv, uchar *buf, int bufsize); + +#define COMPAT_IN 1 +#define COMPAT_OUT 2 + +/* unicode mapping for compatibility with other conversion table. */ +struct compat_t { + uint in; + uint out; + uint flag; +}; + +struct csconv_t { + int codepage; + int flags; + f_mbtowc mbtowc; + f_wctomb wctomb; + f_mblen mblen; + f_flush flush; + DWORD mode; + compat_t *compat; +}; + +struct rec_iconv_t { + iconv_t cd; + f_iconv_close iconv_close; + f_iconv iconv; + f_errno _errno; + csconv_t from; + csconv_t to; +#if defined(USE_LIBICONV_DLL) + HMODULE hlibiconv; +#endif +}; + +static int win_iconv_open( + rec_iconv_t *cd, + const char *tocode, + const char *fromcode); +static int win_iconv_close(iconv_t cd); +static size_t win_iconv( + iconv_t cd, + const char **inbuf, + size_t *inbytesleft, + char **outbuf, + size_t *outbytesleft); + +static int load_mlang(void); +static int make_csconv(const char *name, csconv_t *cv); +static int name_to_codepage(const char *name); +static uint utf16_to_ucs4(const ushort *wbuf); +static void ucs4_to_utf16(uint wc, ushort *wbuf, int *wbufsize); +static int mbtowc_flags(int codepage); +static int must_use_null_useddefaultchar(int codepage); +static char *strrstr(const char *str, const char *token); +static char *xstrndup(const char *s, size_t n); +static int seterror(int err); + +#if defined(USE_LIBICONV_DLL) +static int libiconv_iconv_open( + rec_iconv_t *cd, + const char *tocode, + const char *fromcode); +static PVOID MyImageDirectoryEntryToData( + LPVOID Base, + BOOLEAN MappedAsImage, + USHORT DirectoryEntry, + PULONG Size); +static FARPROC find_imported_function( + HMODULE hModule, + const char *funcname); + +static HMODULE hwiniconv; +#endif + +static int sbcs_mblen(csconv_t *cv, const uchar *buf, int bufsize); +static int dbcs_mblen(csconv_t *cv, const uchar *buf, int bufsize); +static int mbcs_mblen(csconv_t *cv, const uchar *buf, int bufsize); +static int utf8_mblen(csconv_t *cv, const uchar *buf, int bufsize); +static int eucjp_mblen(csconv_t *cv, const uchar *buf, int bufsize); + +static int kernel_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize); +static int kernel_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize); +static int mlang_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize); +static int mlang_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize); +static int utf16_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize); +static int utf16_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize); +static int utf32_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize); +static int utf32_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize); +static int iso2022jp_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize); +static int iso2022jp_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize); +static int iso2022jp_flush( + csconv_t *cv, + uchar *buf, + int bufsize); + +static struct { + int codepage; + const char *name; +} codepage_alias[] = { + {65001, "CP65001"}, + {65001, "UTF8"}, + {65001, "UTF-8"}, + + {1200, "CP1200"}, + {1200, "UTF16LE"}, + {1200, "UTF-16LE"}, + {1200, "UCS2LE"}, + {1200, "UCS-2LE"}, + {1200, "UCS-2-INTERNAL"}, + + {1201, "CP1201"}, + {1201, "UTF16BE"}, + {1201, "UTF-16BE"}, + {1201, "UCS2BE"}, + {1201, "UCS-2BE"}, + {1201, "unicodeFFFE"}, + + {12000, "CP12000"}, + {12000, "UTF32LE"}, + {12000, "UTF-32LE"}, + {12000, "UCS4LE"}, + {12000, "UCS-4LE"}, + + {12001, "CP12001"}, + {12001, "UTF32BE"}, + {12001, "UTF-32BE"}, + {12001, "UCS4BE"}, + {12001, "UCS-4BE"}, + +#ifndef GLIB_COMPILATION + /* + * Default is big endian. + * See rfc2781 4.3 Interpreting text labelled as UTF-16. + */ + {1201, "UTF16"}, + {1201, "UTF-16"}, + {1201, "UCS2"}, + {1201, "UCS-2"}, + {12001, "UTF32"}, + {12001, "UTF-32"}, + {12001, "UCS-4"}, + {12001, "UCS4"}, +#else + /* Default is little endian, because the platform is */ + {1200, "UTF16"}, + {1200, "UTF-16"}, + {1200, "UCS2"}, + {1200, "UCS-2"}, + {12000, "UTF32"}, + {12000, "UTF-32"}, + {12000, "UCS4"}, + {12000, "UCS-4"}, +#endif + + /* copy from libiconv `iconv -l` */ + /* !IsValidCodePage(367) */ + {20127, "ANSI_X3.4-1968"}, + {20127, "ANSI_X3.4-1986"}, + {20127, "ASCII"}, + {20127, "CP367"}, + {20127, "IBM367"}, + {20127, "ISO-IR-6"}, + {20127, "ISO646-US"}, + {20127, "ISO_646.IRV:1991"}, + {20127, "US"}, + {20127, "US-ASCII"}, + {20127, "CSASCII"}, + + /* !IsValidCodePage(819) */ + {1252, "CP819"}, + {1252, "IBM819"}, + {28591, "ISO-8859-1"}, + {28591, "ISO-IR-100"}, + {28591, "ISO8859-1"}, + {28591, "ISO_8859-1"}, + {28591, "ISO_8859-1:1987"}, + {28591, "L1"}, + {28591, "LATIN1"}, + {28591, "CSISOLATIN1"}, + + {1250, "CP1250"}, + {1250, "MS-EE"}, + {1250, "WINDOWS-1250"}, + + {1251, "CP1251"}, + {1251, "MS-CYRL"}, + {1251, "WINDOWS-1251"}, + + {1252, "CP1252"}, + {1252, "MS-ANSI"}, + {1252, "WINDOWS-1252"}, + + {1253, "CP1253"}, + {1253, "MS-GREEK"}, + {1253, "WINDOWS-1253"}, + + {1254, "CP1254"}, + {1254, "MS-TURK"}, + {1254, "WINDOWS-1254"}, + + {1255, "CP1255"}, + {1255, "MS-HEBR"}, + {1255, "WINDOWS-1255"}, + + {1256, "CP1256"}, + {1256, "MS-ARAB"}, + {1256, "WINDOWS-1256"}, + + {1257, "CP1257"}, + {1257, "WINBALTRIM"}, + {1257, "WINDOWS-1257"}, + + {1258, "CP1258"}, + {1258, "WINDOWS-1258"}, + + {850, "850"}, + {850, "CP850"}, + {850, "IBM850"}, + {850, "CSPC850MULTILINGUAL"}, + + /* !IsValidCodePage(862) */ + {862, "862"}, + {862, "CP862"}, + {862, "IBM862"}, + {862, "CSPC862LATINHEBREW"}, + + {866, "866"}, + {866, "CP866"}, + {866, "IBM866"}, + {866, "CSIBM866"}, + + /* !IsValidCodePage(154) */ + {154, "CP154"}, + {154, "CYRILLIC-ASIAN"}, + {154, "PT154"}, + {154, "PTCP154"}, + {154, "CSPTCP154"}, + + /* !IsValidCodePage(1133) */ + {1133, "CP1133"}, + {1133, "IBM-CP1133"}, + + {874, "CP874"}, + {874, "WINDOWS-874"}, + + /* !IsValidCodePage(51932) */ + {51932, "CP51932"}, + {51932, "MS51932"}, + {51932, "WINDOWS-51932"}, + {51932, "EUC-JP"}, + + {932, "CP932"}, + {932, "MS932"}, + {932, "SHIFFT_JIS"}, + {932, "SHIFFT_JIS-MS"}, + {932, "SJIS"}, + {932, "SJIS-MS"}, + {932, "SJIS-OPEN"}, + {932, "SJIS-WIN"}, + {932, "WINDOWS-31J"}, + {932, "WINDOWS-932"}, + {932, "CSWINDOWS31J"}, + + {50221, "CP50221"}, + {50221, "ISO-2022-JP"}, + {50221, "ISO-2022-JP-MS"}, + {50221, "ISO2022-JP"}, + {50221, "ISO2022-JP-MS"}, + {50221, "MS50221"}, + {50221, "WINDOWS-50221"}, + + {936, "CP936"}, + {936, "GBK"}, + {936, "MS936"}, + {936, "WINDOWS-936"}, + + {950, "CP950"}, + {950, "BIG5"}, + {950, "BIG5HKSCS"}, + {950, "BIG5-HKSCS"}, + + {949, "CP949"}, + {949, "UHC"}, + {949, "EUC-KR"}, + + {1361, "CP1361"}, + {1361, "JOHAB"}, + + {437, "437"}, + {437, "CP437"}, + {437, "IBM437"}, + {437, "CSPC8CODEPAGE437"}, + + {737, "CP737"}, + + {775, "CP775"}, + {775, "IBM775"}, + {775, "CSPC775BALTIC"}, + + {852, "852"}, + {852, "CP852"}, + {852, "IBM852"}, + {852, "CSPCP852"}, + + /* !IsValidCodePage(853) */ + {853, "CP853"}, + + {855, "855"}, + {855, "CP855"}, + {855, "IBM855"}, + {855, "CSIBM855"}, + + {857, "857"}, + {857, "CP857"}, + {857, "IBM857"}, + {857, "CSIBM857"}, + + /* !IsValidCodePage(858) */ + {858, "CP858"}, + + {860, "860"}, + {860, "CP860"}, + {860, "IBM860"}, + {860, "CSIBM860"}, + + {861, "861"}, + {861, "CP-IS"}, + {861, "CP861"}, + {861, "IBM861"}, + {861, "CSIBM861"}, + + {863, "863"}, + {863, "CP863"}, + {863, "IBM863"}, + {863, "CSIBM863"}, + + {864, "CP864"}, + {864, "IBM864"}, + {864, "CSIBM864"}, + + {865, "865"}, + {865, "CP865"}, + {865, "IBM865"}, + {865, "CSIBM865"}, + + {869, "869"}, + {869, "CP-GR"}, + {869, "CP869"}, + {869, "IBM869"}, + {869, "CSIBM869"}, + + /* !IsValidCodePage(1152) */ + {1125, "CP1125"}, + + /* + * Code Page Identifiers + * http://msdn2.microsoft.com/en-us/library/ms776446.aspx + */ + {37, "IBM037"}, /* IBM EBCDIC US-Canada */ + {437, "IBM437"}, /* OEM United States */ + {500, "IBM500"}, /* IBM EBCDIC International */ + {708, "ASMO-708"}, /* Arabic (ASMO 708) */ + /* 709 Arabic (ASMO-449+, BCON V4) */ + /* 710 Arabic - Transparent Arabic */ + {720, "DOS-720"}, /* Arabic (Transparent ASMO); Arabic (DOS) */ + {737, "ibm737"}, /* OEM Greek (formerly 437G); Greek (DOS) */ + {775, "ibm775"}, /* OEM Baltic; Baltic (DOS) */ + {850, "ibm850"}, /* OEM Multilingual Latin 1; Western European (DOS) */ + {852, "ibm852"}, /* OEM Latin 2; Central European (DOS) */ + {855, "IBM855"}, /* OEM Cyrillic (primarily Russian) */ + {857, "ibm857"}, /* OEM Turkish; Turkish (DOS) */ + {858, "IBM00858"}, /* OEM Multilingual Latin 1 + Euro symbol */ + {860, "IBM860"}, /* OEM Portuguese; Portuguese (DOS) */ + {861, "ibm861"}, /* OEM Icelandic; Icelandic (DOS) */ + {862, "DOS-862"}, /* OEM Hebrew; Hebrew (DOS) */ + {863, "IBM863"}, /* OEM French Canadian; French Canadian (DOS) */ + {864, "IBM864"}, /* OEM Arabic; Arabic (864) */ + {865, "IBM865"}, /* OEM Nordic; Nordic (DOS) */ + {866, "cp866"}, /* OEM Russian; Cyrillic (DOS) */ + {869, "ibm869"}, /* OEM Modern Greek; Greek, Modern (DOS) */ + /* + * IBM EBCDIC Multilingual/ROECE (Latin 2); + * IBM EBCDIC Multilingual Latin 2 + */ + {870, "IBM870"}, + /* ANSI/OEM Thai (same as 28605, ISO 8859-15); Thai (Windows) */ + {874, "windows-874"}, + {875, "cp875"}, /* IBM EBCDIC Greek Modern */ + {932, "shift_jis"}, /* ANSI/OEM Japanese; Japanese (Shift-JIS) */ + {932, "shift-jis"}, /* alternative name for it */ + /* + * ANSI/OEM Simplified Chinese (PRC, Singapore); + * Chinese Simplified (GB2312) + */ + {936, "gb2312"}, + {949, "ks_c_5601-1987"}, /* ANSI/OEM Korean (Unified Hangul Code) */ + /* + * ANSI/OEM Traditional Chinese (Taiwan; Hong Kong SAR, PRC); + * Chinese Traditional (Big5) + */ + {950, "big5"}, + /* + * ANSI/OEM Traditional Chinese (Hong Kong SAR); + * Chinese Traditional (Big5-HKSCS) + */ + {950, "big5hkscs"}, + {950, "big5-hkscs"}, /* alternative name for it */ + {1026, "IBM1026"}, /* IBM EBCDIC Turkish (Latin 5) */ + {1047, "IBM01047"}, /* IBM EBCDIC Latin 1/Open System */ + /* + * IBM EBCDIC US-Canada (037 + Euro symbol); + * IBM EBCDIC (US-Canada-Euro) + */ + {1140, "IBM01140"}, + /* + * IBM EBCDIC Germany (20273 + Euro symbol); + * IBM EBCDIC (Germany-Euro) + */ + {1141, "IBM01141"}, + /* + * IBM EBCDIC Denmark-Norway (20277 + Euro symbol); + * IBM EBCDIC (Denmark-Norway-Euro) + */ + {1142, "IBM01142"}, + /* + * IBM EBCDIC Finland-Sweden (20278 + Euro symbol); + * IBM EBCDIC (Finland-Sweden-Euro) + */ + {1143, "IBM01143"}, + /* IBM EBCDIC Italy (20280 + Euro symbol); IBM EBCDIC (Italy-Euro) */ + {1144, "IBM01144"}, + /* + * IBM EBCDIC Latin America-Spain (20284 + Euro symbol); + * IBM EBCDIC (Spain-Euro) + */ + {1145, "IBM01145"}, + /* + * IBM EBCDIC United Kingdom (20285 + Euro symbol); + * IBM EBCDIC (UK-Euro) + */ + {1146, "IBM01146"}, + /* + * IBM EBCDIC France (20297 + Euro symbol); + * IBM EBCDIC (France-Euro) + */ + {1147, "IBM01147"}, + /* + * IBM EBCDIC International (500 + Euro symbol); + * IBM EBCDIC (International-Euro) + */ + {1148, "IBM01148"}, + /* + * IBM EBCDIC Icelandic (20871 + Euro symbol); + * IBM EBCDIC (Icelandic-Euro) + */ + {1149, "IBM01149"}, + /* ANSI Central European; Central European (Windows) */ + {1250, "windows-1250"}, + {1251, "windows-1251"}, /* ANSI Cyrillic; Cyrillic (Windows) */ + {1252, "windows-1252"}, /* ANSI Latin 1; Western European (Windows) */ + {1253, "windows-1253"}, /* ANSI Greek; Greek (Windows) */ + {1254, "windows-1254"}, /* ANSI Turkish; Turkish (Windows) */ + {1255, "windows-1255"}, /* ANSI Hebrew; Hebrew (Windows) */ + {1256, "windows-1256"}, /* ANSI Arabic; Arabic (Windows) */ + {1257, "windows-1257"}, /* ANSI Baltic; Baltic (Windows) */ + {1258, "windows-1258"}, /* ANSI/OEM Vietnamese; Vietnamese (Windows) */ + {1361, "Johab"}, /* Korean (Johab) */ + {10000, "macintosh"}, /* MAC Roman; Western European (Mac) */ + {10001, "x-mac-japanese"}, /* Japanese (Mac) */ + /* MAC Traditional Chinese (Big5); Chinese Traditional (Mac) */ + {10002, "x-mac-chinesetrad"}, + {10003, "x-mac-korean"}, /* Korean (Mac) */ + {10004, "x-mac-arabic"}, /* Arabic (Mac) */ + {10005, "x-mac-hebrew"}, /* Hebrew (Mac) */ + {10006, "x-mac-greek"}, /* Greek (Mac) */ + {10007, "x-mac-cyrillic"}, /* Cyrillic (Mac) */ + /* MAC Simplified Chinese (GB 2312); Chinese Simplified (Mac) */ + {10008, "x-mac-chinesesimp"}, + {10010, "x-mac-romanian"}, /* Romanian (Mac) */ + {10017, "x-mac-ukrainian"}, /* Ukrainian (Mac) */ + {10021, "x-mac-thai"}, /* Thai (Mac) */ + {10029, "x-mac-ce"}, /* MAC Latin 2; Central European (Mac) */ + {10079, "x-mac-icelandic"}, /* Icelandic (Mac) */ + {10081, "x-mac-turkish"}, /* Turkish (Mac) */ + {10082, "x-mac-croatian"}, /* Croatian (Mac) */ + {20000, "x-Chinese_CNS"}, /* CNS Taiwan; Chinese Traditional (CNS) */ + {20001, "x-cp20001"}, /* TCA Taiwan */ + {20002, "x_Chinese-Eten"}, /* Eten Taiwan; Chinese Traditional (Eten) */ + {20003, "x-cp20003"}, /* IBM5550 Taiwan */ + {20004, "x-cp20004"}, /* TeleText Taiwan */ + {20005, "x-cp20005"}, /* Wang Taiwan */ + /* + * IA5 (IRV International Alphabet No. 5, 7-bit); + * Western European (IA5) + */ + {20105, "x-IA5"}, + {20106, "x-IA5-German"}, /* IA5 German (7-bit) */ + {20107, "x-IA5-Swedish"}, /* IA5 Swedish (7-bit) */ + {20108, "x-IA5-Norwegian"}, /* IA5 Norwegian (7-bit) */ + {20127, "us-ascii"}, /* US-ASCII (7-bit) */ + {20261, "x-cp20261"}, /* T.61 */ + {20269, "x-cp20269"}, /* ISO 6937 Non-Spacing Accent */ + {20273, "IBM273"}, /* IBM EBCDIC Germany */ + {20277, "IBM277"}, /* IBM EBCDIC Denmark-Norway */ + {20278, "IBM278"}, /* IBM EBCDIC Finland-Sweden */ + {20280, "IBM280"}, /* IBM EBCDIC Italy */ + {20284, "IBM284"}, /* IBM EBCDIC Latin America-Spain */ + {20285, "IBM285"}, /* IBM EBCDIC United Kingdom */ + {20290, "IBM290"}, /* IBM EBCDIC Japanese Katakana Extended */ + {20297, "IBM297"}, /* IBM EBCDIC France */ + {20420, "IBM420"}, /* IBM EBCDIC Arabic */ + {20423, "IBM423"}, /* IBM EBCDIC Greek */ + {20424, "IBM424"}, /* IBM EBCDIC Hebrew */ + {20833, "x-EBCDIC-KoreanExtended"}, /* IBM EBCDIC Korean Extended */ + {20838, "IBM-Thai"}, /* IBM EBCDIC Thai */ + {20866, "koi8-r"}, /* Russian (KOI8-R); Cyrillic (KOI8-R) */ + {20871, "IBM871"}, /* IBM EBCDIC Icelandic */ + {20880, "IBM880"}, /* IBM EBCDIC Cyrillic Russian */ + {20905, "IBM905"}, /* IBM EBCDIC Turkish */ + /* IBM EBCDIC Latin 1/Open System (1047 + Euro symbol) */ + {20924, "IBM00924"}, + {20932, "EUC-JP"}, /* Japanese (JIS 0208-1990 and 0121-1990) */ + /* Simplified Chinese (GB2312); Chinese Simplified (GB2312-80) */ + {20936, "x-cp20936"}, + {20949, "x-cp20949"}, /* Korean Wansung */ + {21025, "cp1025"}, /* IBM EBCDIC Cyrillic Serbian-Bulgarian */ + /* 21027 (deprecated) */ + {21866, "koi8-u"}, /* Ukrainian (KOI8-U); Cyrillic (KOI8-U) */ + {28591, "iso-8859-1"}, /* ISO 8859-1 Latin 1; Western European (ISO) */ + {28591, "iso8859-1"}, /* ISO 8859-1 Latin 1; Western European (ISO) */ + {28591, "iso_8859-1"}, + {28591, "iso_8859_1"}, + /* ISO 8859-2 Central European; Central European (ISO) */ + {28592, "iso-8859-2"}, + /* ISO 8859-2 Central European; Central European (ISO) */ + {28592, "iso8859-2"}, + {28592, "iso_8859-2"}, + {28592, "iso_8859_2"}, + {28593, "iso-8859-3"}, /* ISO 8859-3 Latin 3 */ + {28593, "iso8859-3"}, /* ISO 8859-3 Latin 3 */ + {28593, "iso_8859-3"}, + {28593, "iso_8859_3"}, + {28594, "iso-8859-4"}, /* ISO 8859-4 Baltic */ + {28594, "iso8859-4"}, /* ISO 8859-4 Baltic */ + {28594, "iso_8859-4"}, + {28594, "iso_8859_4"}, + {28595, "iso-8859-5"}, /* ISO 8859-5 Cyrillic */ + {28595, "iso8859-5"}, /* ISO 8859-5 Cyrillic */ + {28595, "iso_8859-5"}, + {28595, "iso_8859_5"}, + {28596, "iso-8859-6"}, /* ISO 8859-6 Arabic */ + {28596, "iso8859-6"}, /* ISO 8859-6 Arabic */ + {28596, "iso_8859-6"}, + {28596, "iso_8859_6"}, + {28597, "iso-8859-7"}, /* ISO 8859-7 Greek */ + {28597, "iso8859-7"}, /* ISO 8859-7 Greek */ + {28597, "iso_8859-7"}, + {28597, "iso_8859_7"}, + {28598, "iso-8859-8"}, /* ISO 8859-8 Hebrew; Hebrew (ISO-Visual) */ + {28598, "iso8859-8"}, /* ISO 8859-8 Hebrew; Hebrew (ISO-Visual) */ + {28598, "iso_8859-8"}, + {28598, "iso_8859_8"}, + {28599, "iso-8859-9"}, /* ISO 8859-9 Turkish */ + {28599, "iso8859-9"}, /* ISO 8859-9 Turkish */ + {28599, "iso_8859-9"}, + {28599, "iso_8859_9"}, + {28603, "iso-8859-13"}, /* ISO 8859-13 Estonian */ + {28603, "iso8859-13"}, /* ISO 8859-13 Estonian */ + {28603, "iso_8859-13"}, + {28603, "iso_8859_13"}, + {28605, "iso-8859-15"}, /* ISO 8859-15 Latin 9 */ + {28605, "iso8859-15"}, /* ISO 8859-15 Latin 9 */ + {28605, "iso_8859-15"}, + {28605, "iso_8859_15"}, + {29001, "x-Europa"}, /* Europa 3 */ + {38598, "iso-8859-8-i"}, /* ISO 8859-8 Hebrew; Hebrew (ISO-Logical) */ + {38598, "iso8859-8-i"}, /* ISO 8859-8 Hebrew; Hebrew (ISO-Logical) */ + {38598, "iso_8859-8-i"}, + {38598, "iso_8859_8-i"}, + /* + * ISO 2022 Japanese with no halfwidth Katakana; + * Japanese (JIS) + */ + {50220, "iso-2022-jp"}, + /* + * ISO 2022 Japanese with halfwidth Katakana; + * Japanese (JIS-Allow 1 byte Kana) + */ + {50221, "csISO2022JP"}, + /* + * ISO 2022 Japanese JIS X 0201-1989; + * Japanese (JIS-Allow 1 byte Kana - SO/SI) + */ + {50222, "iso-2022-jp"}, + {50225, "iso-2022-kr"}, /* ISO 2022 Korean */ + {50225, "iso2022-kr"}, /* ISO 2022 Korean */ + /* ISO 2022 Simplified Chinese; Chinese Simplified (ISO 2022) */ + {50227, "x-cp50227"}, + /* 50229 ISO 2022 Traditional Chinese */ + /* 50930 EBCDIC Japanese (Katakana) Extended */ + /* 50931 EBCDIC US-Canada and Japanese */ + /* 50933 EBCDIC Korean Extended and Korean */ + /* 50935 EBCDIC Simplified Chinese Extended and Simplified Chinese */ + /* 50936 EBCDIC Simplified Chinese */ + /* 50937 EBCDIC US-Canada and Traditional Chinese */ + /* 50939 EBCDIC Japanese (Latin) Extended and Japanese */ + {51932, "euc-jp"}, /* EUC Japanese */ + {51936, "EUC-CN"}, /* EUC Simplified Chinese; Chinese Simplified (EUC) */ + {51949, "euc-kr"}, /* EUC Korean */ + /* 51950 EUC Traditional Chinese */ + /* HZ-GB2312 Simplified Chinese; Chinese Simplified (HZ) */ + {52936, "hz-gb-2312"}, + /* + * Windows XP and later: GB18030 Simplified Chinese (4 byte); + * Chinese Simplified (GB18030) + */ + {54936, "GB18030"}, + {57002, "x-iscii-de"}, /* ISCII Devanagari */ + {57003, "x-iscii-be"}, /* ISCII Bengali */ + {57004, "x-iscii-ta"}, /* ISCII Tamil */ + {57005, "x-iscii-te"}, /* ISCII Telugu */ + {57006, "x-iscii-as"}, /* ISCII Assamese */ + {57007, "x-iscii-or"}, /* ISCII Oriya */ + {57008, "x-iscii-ka"}, /* ISCII Kannada */ + {57009, "x-iscii-ma"}, /* ISCII Malayalam */ + {57010, "x-iscii-gu"}, /* ISCII Gujarati */ + {57011, "x-iscii-pa"}, /* ISCII Punjabi */ + + {0, NULL} +}; + +/* + * SJIS SHIFTJIS table CP932 table + * ---- --------------------------- -------------------------------- + * 5C U+00A5 YEN SIGN U+005C REVERSE SOLIDUS + * 7E U+203E OVERLINE U+007E TILDE + * 815C U+2014 EM DASH U+2015 HORIZONTAL BAR + * 815F U+005C REVERSE SOLIDUS U+FF3C FULLWIDTH REVERSE SOLIDUS + * 8160 U+301C WAVE DASH U+FF5E FULLWIDTH TILDE + * 8161 U+2016 DOUBLE VERTICAL LINE U+2225 PARALLEL TO + * 817C U+2212 MINUS SIGN U+FF0D FULLWIDTH HYPHEN-MINUS + * 8191 U+00A2 CENT SIGN U+FFE0 FULLWIDTH CENT SIGN + * 8192 U+00A3 POUND SIGN U+FFE1 FULLWIDTH POUND SIGN + * 81CA U+00AC NOT SIGN U+FFE2 FULLWIDTH NOT SIGN + * + * EUC-JP and ISO-2022-JP should be compatible with CP932. + * + * Kernel and MLang have different Unicode mapping table. Make sure + * which API is used. + */ +static compat_t cp932_compat[] = { + {0x00A5, 0x005C, COMPAT_OUT}, + {0x203E, 0x007E, COMPAT_OUT}, + {0x2014, 0x2015, COMPAT_OUT}, + {0x301C, 0xFF5E, COMPAT_OUT}, + {0x2016, 0x2225, COMPAT_OUT}, + {0x2212, 0xFF0D, COMPAT_OUT}, + {0x00A2, 0xFFE0, COMPAT_OUT}, + {0x00A3, 0xFFE1, COMPAT_OUT}, + {0x00AC, 0xFFE2, COMPAT_OUT}, + {0, 0, 0} +}; + +static compat_t cp20932_compat[] = { + {0x00A5, 0x005C, COMPAT_OUT}, + {0x203E, 0x007E, COMPAT_OUT}, + {0x2014, 0x2015, COMPAT_OUT}, + {0xFF5E, 0x301C, COMPAT_OUT|COMPAT_IN}, + {0x2225, 0x2016, COMPAT_OUT|COMPAT_IN}, + {0xFF0D, 0x2212, COMPAT_OUT|COMPAT_IN}, + {0xFFE0, 0x00A2, COMPAT_OUT|COMPAT_IN}, + {0xFFE1, 0x00A3, COMPAT_OUT|COMPAT_IN}, + {0xFFE2, 0x00AC, COMPAT_OUT|COMPAT_IN}, + {0, 0, 0} +}; + +static compat_t *cp51932_compat = cp932_compat; + +/* cp20932_compat for kernel. cp932_compat for mlang. */ +static compat_t *cp5022x_compat = cp932_compat; + +typedef HRESULT (WINAPI *CONVERTINETSTRING)( + LPDWORD lpdwMode, + DWORD dwSrcEncoding, + DWORD dwDstEncoding, + LPCSTR lpSrcStr, + LPINT lpnSrcSize, + LPBYTE lpDstStr, + LPINT lpnDstSize +); +typedef HRESULT (WINAPI *CONVERTINETMULTIBYTETOUNICODE)( + LPDWORD lpdwMode, + DWORD dwSrcEncoding, + LPCSTR lpSrcStr, + LPINT lpnMultiCharCount, + LPWSTR lpDstStr, + LPINT lpnWideCharCount +); +typedef HRESULT (WINAPI *CONVERTINETUNICODETOMULTIBYTE)( + LPDWORD lpdwMode, + DWORD dwEncoding, + LPCWSTR lpSrcStr, + LPINT lpnWideCharCount, + LPSTR lpDstStr, + LPINT lpnMultiCharCount +); +typedef HRESULT (WINAPI *ISCONVERTINETSTRINGAVAILABLE)( + DWORD dwSrcEncoding, + DWORD dwDstEncoding +); +typedef HRESULT (WINAPI *LCIDTORFC1766A)( + LCID Locale, + LPSTR pszRfc1766, + int nChar +); +typedef HRESULT (WINAPI *LCIDTORFC1766W)( + LCID Locale, + LPWSTR pszRfc1766, + int nChar +); +typedef HRESULT (WINAPI *RFC1766TOLCIDA)( + LCID *pLocale, + LPSTR pszRfc1766 +); +typedef HRESULT (WINAPI *RFC1766TOLCIDW)( + LCID *pLocale, + LPWSTR pszRfc1766 +); +static CONVERTINETSTRING ConvertINetString; +static CONVERTINETMULTIBYTETOUNICODE ConvertINetMultiByteToUnicode; +static CONVERTINETUNICODETOMULTIBYTE ConvertINetUnicodeToMultiByte; +static ISCONVERTINETSTRINGAVAILABLE IsConvertINetStringAvailable; +static LCIDTORFC1766A LcidToRfc1766A; +static RFC1766TOLCIDA Rfc1766ToLcidA; + +static int load_mlang(void) { + HMODULE h; + if (ConvertINetString != NULL) + return TRUE; + h = LoadLibrary(TEXT("mlang.dll")); + if (!h) + return FALSE; + ConvertINetString = + (CONVERTINETSTRING)GetProcAddressA(h, "ConvertINetString"); + ConvertINetMultiByteToUnicode = + (CONVERTINETMULTIBYTETOUNICODE)GetProcAddressA( + h, "ConvertINetMultiByteToUnicode"); + ConvertINetUnicodeToMultiByte = + (CONVERTINETUNICODETOMULTIBYTE)GetProcAddressA( + h, "ConvertINetUnicodeToMultiByte"); + IsConvertINetStringAvailable = + (ISCONVERTINETSTRINGAVAILABLE)GetProcAddressA( + h, "IsConvertINetStringAvailable"); + LcidToRfc1766A = + (LCIDTORFC1766A)GetProcAddressA(h, "LcidToRfc1766A"); + Rfc1766ToLcidA = + (RFC1766TOLCIDA)GetProcAddressA(h, "Rfc1766ToLcidA"); + return TRUE; +} + +iconv_t iconv_open(const char *tocode, const char *fromcode) { + rec_iconv_t *cd; + + cd = (rec_iconv_t *)calloc(1, sizeof(rec_iconv_t)); + if (cd == NULL) + return (iconv_t)(-1); + +#if defined(USE_LIBICONV_DLL) + errno = 0; + if (libiconv_iconv_open(cd, tocode, fromcode)) + return (iconv_t)cd; +#endif + + /* reset the errno to prevent reporting wrong error code. + * 0 for unsorted error. */ + errno = 0; + if (win_iconv_open(cd, tocode, fromcode)) + return (iconv_t)cd; + + free(cd); + + return (iconv_t)(-1); +} + +int iconv_close(iconv_t _cd) { + rec_iconv_t *cd = (rec_iconv_t *)_cd; + int r = cd->iconv_close(cd->cd); + int e = *(cd->_errno()); +#if defined(USE_LIBICONV_DLL) + if (cd->hlibiconv != NULL) + FreeLibrary(cd->hlibiconv); +#endif + free(cd); + errno = e; + return r; +} + +size_t iconv( + iconv_t _cd, + const char **inbuf, + size_t *inbytesleft, + char **outbuf, + size_t *outbytesleft +) { + rec_iconv_t *cd = (rec_iconv_t *)_cd; + size_t r = cd->iconv(cd->cd, inbuf, inbytesleft, outbuf, outbytesleft); + errno = *(cd->_errno()); + return r; +} + +static int win_iconv_open( + rec_iconv_t *cd, + const char *tocode, + const char *fromcode +) { + if (!make_csconv(fromcode, &cd->from) || !make_csconv(tocode, &cd->to)) + return FALSE; + cd->iconv_close = win_iconv_close; + cd->iconv = win_iconv; + cd->_errno = _errno; + cd->cd = (iconv_t)cd; + return TRUE; +} + +static int win_iconv_close(iconv_t cd UNUSED) { + return 0; +} + +static size_t win_iconv( + iconv_t _cd, + const char **inbuf, + size_t *inbytesleft, + char **outbuf, + size_t *outbytesleft +) { + rec_iconv_t *cd = (rec_iconv_t *)_cd; + ushort wbuf[MB_CHAR_MAX]; /* enough room for one character */ + int insize; + int outsize; + int wsize; + DWORD frommode; + DWORD tomode; + uint wc; + compat_t *cp; + int i; + + if (inbuf == NULL || *inbuf == NULL) { + if (outbuf != NULL && *outbuf != NULL && cd->to.flush != NULL) { + tomode = cd->to.mode; + outsize = cd->to.flush( + &cd->to, + (uchar *)*outbuf, + *outbytesleft); + if (outsize == -1) { + if ((cd->to.flags & FLAG_IGNORE) && errno != E2BIG) { + outsize = 0; + } else { + cd->to.mode = tomode; + return (size_t)(-1); + } + } + *outbuf += outsize; + *outbytesleft -= outsize; + } + cd->from.mode = 0; + cd->to.mode = 0; + return 0; + } + + while (*inbytesleft != 0) { + frommode = cd->from.mode; + tomode = cd->to.mode; + wsize = MB_CHAR_MAX; + + insize = cd->from.mbtowc( + &cd->from, + (const uchar *)*inbuf, + *inbytesleft, wbuf, &wsize); + if (insize == -1) { + if (cd->to.flags & FLAG_IGNORE) { + cd->from.mode = frommode; + insize = 1; + wsize = 0; + } else { + cd->from.mode = frommode; + return (size_t)(-1); + } + } + + if (wsize == 0) { + *inbuf += insize; + *inbytesleft -= insize; + continue; + } + + if (cd->from.compat != NULL) { + wc = utf16_to_ucs4(wbuf); + cp = cd->from.compat; + for (i = 0; cp[i].in != 0; ++i) { + if ((cp[i].flag & COMPAT_IN) && cp[i].out == wc) { + ucs4_to_utf16(cp[i].in, wbuf, &wsize); + break; + } + } + } + + if (cd->to.compat != NULL) { + wc = utf16_to_ucs4(wbuf); + cp = cd->to.compat; + for (i = 0; cp[i].in != 0; ++i) { + if ((cp[i].flag & COMPAT_OUT) && cp[i].in == wc) { + ucs4_to_utf16(cp[i].out, wbuf, &wsize); + break; + } + } + } + + outsize = cd->to.wctomb( + &cd->to, + wbuf, wsize, + (uchar *)*outbuf, + *outbytesleft); + if (outsize == -1) { + if ((cd->to.flags & FLAG_IGNORE) && errno != E2BIG) { + cd->to.mode = tomode; + outsize = 0; + } else { + cd->from.mode = frommode; + cd->to.mode = tomode; + return (size_t)(-1); + } + } + + *inbuf += insize; + *outbuf += outsize; + *inbytesleft -= insize; + *outbytesleft -= outsize; + } + + return 0; +} + +static int make_csconv(const char *_name, csconv_t *cv) { + CPINFO cpinfo; + int use_compat = TRUE; + int flag = 0; + char *name; + char *p; + + name = xstrndup(_name, strlen(_name)); + if (name == NULL) + return FALSE; + + /* check for option "enc_name//opt1//opt2" */ + while ((p = strrstr(name, "//")) != NULL) { + if (_stricmp(p + 2, "nocompat") == 0) + use_compat = FALSE; + else if (_stricmp(p + 2, "translit") == 0) + flag |= FLAG_TRANSLIT; + else if (_stricmp(p + 2, "ignore") == 0) + flag |= FLAG_IGNORE; + *p = 0; + } + + cv->mode = 0; + cv->flags = flag; + cv->mblen = NULL; + cv->flush = NULL; + cv->compat = NULL; + cv->codepage = name_to_codepage(name); + if (cv->codepage == 1200 || cv->codepage == 1201) { + cv->mbtowc = utf16_mbtowc; + cv->wctomb = utf16_wctomb; + if (_stricmp(name, "UTF-16") == 0 || + _stricmp(name, "UTF16") == 0 || + _stricmp(name, "UCS-2") == 0 || + _stricmp(name, "UCS2") == 0 || + _stricmp(name, "UCS-2-INTERNAL") == 0) + cv->flags |= FLAG_USE_BOM; + } else if (cv->codepage == 12000 || cv->codepage == 12001) { + cv->mbtowc = utf32_mbtowc; + cv->wctomb = utf32_wctomb; + if (_stricmp(name, "UTF-32") == 0 || + _stricmp(name, "UTF32") == 0 || + _stricmp(name, "UCS-4") == 0 || + _stricmp(name, "UCS4") == 0) + cv->flags |= FLAG_USE_BOM; + } else if (cv->codepage == 65001) { + cv->mbtowc = kernel_mbtowc; + cv->wctomb = kernel_wctomb; + cv->mblen = utf8_mblen; + } else if ((cv->codepage == 50220 || + cv->codepage == 50221 || + cv->codepage == 50222) && load_mlang()) { + cv->mbtowc = iso2022jp_mbtowc; + cv->wctomb = iso2022jp_wctomb; + cv->flush = iso2022jp_flush; + } else if (cv->codepage == 51932 && load_mlang()) { + cv->mbtowc = mlang_mbtowc; + cv->wctomb = mlang_wctomb; + cv->mblen = eucjp_mblen; + } else if (IsValidCodePage(cv->codepage) + && GetCPInfo(cv->codepage, &cpinfo) != 0) { + cv->mbtowc = kernel_mbtowc; + cv->wctomb = kernel_wctomb; + if (cpinfo.MaxCharSize == 1) + cv->mblen = sbcs_mblen; + else if (cpinfo.MaxCharSize == 2) + cv->mblen = dbcs_mblen; + else + cv->mblen = mbcs_mblen; + } else { + /* not supported */ + free(name); + errno = EINVAL; + return FALSE; + } + + if (use_compat) { + switch (cv->codepage) { + case 932: cv->compat = cp932_compat; break; + case 20932: cv->compat = cp20932_compat; break; + case 51932: cv->compat = cp51932_compat; break; + case 50220: + case 50221: + case 50222: cv->compat = cp5022x_compat; break; + } + } + + free(name); + + return TRUE; +} + +static int name_to_codepage(const char *name) { + int i; + + if (*name == '\0' || + strcmp(name, "char") == 0) + return GetACP(); + else if (strcmp(name, "wchar_t") == 0) + return 1200; + else if (_strnicmp(name, "cp", 2) == 0) + return atoi(name + 2); /* CP123 */ + else if ('0' <= name[0] && name[0] <= '9') + return atoi(name); /* 123 */ + else if (_strnicmp(name, "xx", 2) == 0) + return atoi(name + 2); /* XX123 for debug */ + + for (i = 0; codepage_alias[i].name != NULL; ++i) + if (_stricmp(name, codepage_alias[i].name) == 0) + return codepage_alias[i].codepage; + return -1; +} + +/* + * http://www.faqs.org/rfcs/rfc2781.html + */ +static uint utf16_to_ucs4(const ushort *wbuf) { + uint wc = wbuf[0]; + if (0xD800 <= wbuf[0] && wbuf[0] <= 0xDBFF) + wc = ((wbuf[0] & 0x3FF) << 10) + (wbuf[1] & 0x3FF) + 0x10000; + return wc; +} + +static void ucs4_to_utf16(uint wc, ushort *wbuf, int *wbufsize) { + if (wc < 0x10000) { + wbuf[0] = wc; + *wbufsize = 1; + } else { + wc -= 0x10000; + wbuf[0] = 0xD800 | ((wc >> 10) & 0x3FF); + wbuf[1] = 0xDC00 | (wc & 0x3FF); + *wbufsize = 2; + } +} + +/* + * Check if codepage is one of those for which the dwFlags parameter + * to MultiByteToWideChar() must be zero. Return zero or + * MB_ERR_INVALID_CHARS. The docs in Platform SDK for Windows + * Server 2003 R2 claims that also codepage 65001 is one of these, but + * that doesn't seem to be the case. The MSDN docs for MSVS2008 leave + * out 65001 (UTF-8), and that indeed seems to be the case on XP, it + * works fine to pass MB_ERR_INVALID_CHARS in dwFlags when converting + * from UTF-8. + */ +static int mbtowc_flags(int codepage) { + return (codepage == 50220 || codepage == 50221 || + codepage == 50222 || codepage == 50225 || + codepage == 50227 || codepage == 50229 || + codepage == 52936 || codepage == 54936 || + (codepage >= 57002 && codepage <= 57011) || + codepage == 65000 || codepage == 42) ? 0 : MB_ERR_INVALID_CHARS; +} + +/* + * Check if codepage is one those for which the lpUsedDefaultChar + * parameter to WideCharToMultiByte() must be NULL. The docs in + * Platform SDK for Windows Server 2003 R2 claims that this is the + * list below, while the MSDN docs for MSVS2008 claim that it is only + * for 65000 (UTF-7) and 65001 (UTF-8). This time the earlier Platform + * SDK seems to be correct, at least for XP. + */ +static int must_use_null_useddefaultchar(int codepage) { + return (codepage == 65000 || codepage == 65001 || + codepage == 50220 || codepage == 50221 || + codepage == 50222 || codepage == 50225 || + codepage == 50227 || codepage == 50229 || + codepage == 52936 || codepage == 54936 || + (codepage >= 57002 && codepage <= 57011) || + codepage == 42); +} + +static char * strrstr(const char *str, const char *token) { + int len = strlen(token); + const char *p = str + strlen(str); + + while (str <= --p) + if (p[0] == token[0] && strncmp(p, token, len) == 0) + return (char *)p; + return NULL; +} + +static char * xstrndup(const char *s, size_t n) { + char *p; + + p = (char *)malloc(n + 1); + if (p == NULL) + return NULL; + memcpy(p, s, n); + p[n] = '\0'; + return p; +} + +static int seterror(int err) { + errno = err; + return -1; +} + +#if defined(USE_LIBICONV_DLL) +static int libiconv_iconv_open( + rec_iconv_t *cd, + const char *tocode, + const char *fromcode +) { + HMODULE hlibiconv = NULL; + char *dllname; + const char *p; + const char *e; + f_iconv_open _iconv_open; + + /* + * always try to load dll, so that we can switch dll in runtime. + */ + + /* XXX: getenv() can't get variable set by SetEnvironmentVariable() */ + p = getenv("WINICONV_LIBICONV_DLL"); + if (p == NULL) + p = DEFAULT_LIBICONV_DLL; + /* parse comma separated value */ + for ( ; *p != 0; p = (*e == ',') ? e + 1 : e) { + e = strchr(p, ','); + if (p == e) + continue; + else if (e == NULL) + e = p + strlen(p); + dllname = xstrndup(p, e - p); + if (dllname == NULL) + return FALSE; + hlibiconv = LoadLibraryA(dllname); + free(dllname); + if (hlibiconv != NULL) { + if (hlibiconv == hwiniconv) { + FreeLibrary(hlibiconv); + hlibiconv = NULL; + continue; + } + break; + } + } + + if (hlibiconv == NULL) + goto failed; + + _iconv_open = (f_iconv_open)GetProcAddressA( + hlibiconv, + "libiconv_open"); + if (_iconv_open == NULL) + _iconv_open = (f_iconv_open)GetProcAddressA( + hlibiconv, + "iconv_open"); + cd->iconv_close = (f_iconv_close)GetProcAddressA( + hlibiconv, + "libiconv_close"); + if (cd->iconv_close == NULL) + cd->iconv_close = (f_iconv_close)GetProcAddressA( + hlibiconv, + "iconv_close"); + cd->iconv = (f_iconv)GetProcAddressA( + hlibiconv, + "libiconv"); + if (cd->iconv == NULL) + cd->iconv = (f_iconv)GetProcAddressA( + hlibiconv, + "iconv"); + cd->_errno = (f_errno)find_imported_function( + hlibiconv, + "_errno"); + if (_iconv_open == NULL || cd->iconv_close == NULL + || cd->iconv == NULL || cd->_errno == NULL) + goto failed; + + cd->cd = _iconv_open(tocode, fromcode); + if (cd->cd == (iconv_t)(-1)) + goto failed; + + cd->hlibiconv = hlibiconv; + return TRUE; + +failed: + if (hlibiconv != NULL) + FreeLibrary(hlibiconv); + return FALSE; +} + +/* + * Reference: + * http://forums.belution.com/ja/vc/000/234/78s.shtml + * http://nienie.com/~masapico/api_ImageDirectoryEntryToData.html + * + * The formal way is + * imagehlp.h or dbghelp.h + * imagehlp.lib or dbghelp.lib + * ImageDirectoryEntryToData() + */ +#define TO_DOS_HEADER(base) ((PIMAGE_DOS_HEADER)(base)) +#define TO_NT_HEADERS(base) \ +((PIMAGE_NT_HEADERS)((LPBYTE)(base) + TO_DOS_HEADER(base)->e_lfanew)) +static PVOID MyImageDirectoryEntryToData( + LPVOID Base, + BOOLEAN MappedAsImage, + USHORT DirectoryEntry, + PULONG Size +) { + /* TODO: MappedAsImage? */ + PIMAGE_DATA_DIRECTORY p; + p = TO_NT_HEADERS(Base)->OptionalHeader.DataDirectory + DirectoryEntry; + if (p->VirtualAddress == 0) { + *Size = 0; + return NULL; + } + *Size = p->Size; + return (PVOID)((LPBYTE)Base + p->VirtualAddress); +} + +static FARPROC find_imported_function( + HMODULE hModule, + const char *funcname +) { + DWORD_PTR Base; + ULONG Size; + PIMAGE_IMPORT_DESCRIPTOR Imp; + PIMAGE_THUNK_DATA Address; /* Import Address Table */ + PIMAGE_THUNK_DATA Name; /* Import Name Table */ + PIMAGE_IMPORT_BY_NAME ImpName; + + Base = (DWORD_PTR)hModule; + Imp = (PIMAGE_IMPORT_DESCRIPTOR)MyImageDirectoryEntryToData( + (LPVOID)Base, + TRUE, + IMAGE_DIRECTORY_ENTRY_IMPORT, + &Size); + if (Imp == NULL) + return NULL; + for ( ; Imp->OriginalFirstThunk != 0; ++Imp) { + Address = (PIMAGE_THUNK_DATA)(Base + Imp->FirstThunk); + Name = (PIMAGE_THUNK_DATA)(Base + Imp->OriginalFirstThunk); + for ( ; Name->u1.Ordinal != 0; ++Name, ++Address) { + if (!IMAGE_SNAP_BY_ORDINAL(Name->u1.Ordinal)) { + ImpName = (PIMAGE_IMPORT_BY_NAME) + (Base + (DWORD_PTR)Name->u1.AddressOfData); + if (strcmp((char *)ImpName->Name, funcname) == 0) + return (FARPROC)Address->u1.Function; + } + } + } + return NULL; +} +#endif + +static int sbcs_mblen( + csconv_t *cv UNUSED, + const uchar *buf UNUSED, + int bufsize UNUSED +) { + return 1; +} + +static int dbcs_mblen( + csconv_t *cv, + const uchar *buf, + int bufsize +) { + int len = IsDBCSLeadByteEx(cv->codepage, buf[0]) ? 2 : 1; + if (bufsize < len) + return seterror(EINVAL); + return len; +} + +static int mbcs_mblen( + csconv_t *cv, + const uchar *buf, + int bufsize +) { + int len = 0; + + if (cv->codepage == 54936) { + if (buf[0] <= 0x7F) len = 1; + else if (buf[0] >= 0x81 && buf[0] <= 0xFE && + bufsize >= 2 && + ((buf[1] >= 0x40 && buf[1] <= 0x7E) || + (buf[1] >= 0x80 && buf[1] <= 0xFE))) len = 2; + else if (buf[0] >= 0x81 && buf[0] <= 0xFE && + bufsize >= 4 && + buf[1] >= 0x30 && buf[1] <= 0x39) len = 4; + else + return seterror(EINVAL); + return len; + } else { + return seterror(EINVAL); + } +} + +static int utf8_mblen( + csconv_t *cv UNUSED, + const uchar *buf, + int bufsize +) { + int len = 0; + + if (buf[0] < 0x80) len = 1; + else if ((buf[0] & 0xE0) == 0xC0) len = 2; + else if ((buf[0] & 0xF0) == 0xE0) len = 3; + else if ((buf[0] & 0xF8) == 0xF0) len = 4; + else if ((buf[0] & 0xFC) == 0xF8) len = 5; + else if ((buf[0] & 0xFE) == 0xFC) len = 6; + + if (len == 0) + return seterror(EILSEQ); + else if (bufsize < len) + return seterror(EINVAL); + return len; +} + +static int eucjp_mblen( + csconv_t *cv UNUSED, + const uchar *buf, + int bufsize +) { + if (buf[0] < 0x80) { /* ASCII */ + return 1; + } else if (buf[0] == 0x8E) { /* JIS X 0201 */ + if (bufsize < 2) + return seterror(EINVAL); + else if (!(0xA1 <= buf[1] && buf[1] <= 0xDF)) + return seterror(EILSEQ); + return 2; + } else if (buf[0] == 0x8F) { /* JIS X 0212 */ + if (bufsize < 3) + return seterror(EINVAL); + else if (!(0xA1 <= buf[1] && buf[1] <= 0xFE) + || !(0xA1 <= buf[2] && buf[2] <= 0xFE)) + return seterror(EILSEQ); + return 3; + } else { /* JIS X 0208 */ + if (bufsize < 2) + return seterror(EINVAL); + else if (!(0xA1 <= buf[0] && buf[0] <= 0xFE) + || !(0xA1 <= buf[1] && buf[1] <= 0xFE)) + return seterror(EILSEQ); + return 2; + } +} + +static int kernel_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize +) { + int len; + + len = cv->mblen(cv, buf, bufsize); + if (len == -1) + return -1; + /* If converting from ASCII, reject 8bit + * chars. MultiByteToWideChar() doesn't. Note that for ASCII we + * know that the mblen function is sbcs_mblen() so len is 1. + */ + if (cv->codepage == 20127 && buf[0] >= 0x80) + return seterror(EILSEQ); + *wbufsize = MultiByteToWideChar( + cv->codepage, + mbtowc_flags(cv->codepage), + (const char *)buf, + len, + (wchar_t *)wbuf, *wbufsize); + if (*wbufsize == 0) + return seterror(EILSEQ); + return len; +} + +static int kernel_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize +) { + BOOL usedDefaultChar = 0; + BOOL *p = NULL; + int flags = 0; + int len; + + if (bufsize == 0) + return seterror(E2BIG); + if (!must_use_null_useddefaultchar(cv->codepage)) { + p = &usedDefaultChar; +#ifdef WC_NO_BEST_FIT_CHARS + if (!(cv->flags & FLAG_TRANSLIT)) + flags |= WC_NO_BEST_FIT_CHARS; +#endif + } + len = WideCharToMultiByte(cv->codepage, flags, + (const wchar_t *)wbuf, wbufsize, (char *)buf, bufsize, NULL, p); + if (len == 0) { + if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) + return seterror(E2BIG); + return seterror(EILSEQ); + } else if (usedDefaultChar && !(cv->flags & FLAG_TRANSLIT)) { + return seterror(EILSEQ); + } else if (cv->mblen(cv, buf, len) != len) { /* validate result */ + return seterror(EILSEQ); + } + return len; +} + +/* + * It seems that the mode (cv->mode) is fixnum. + * For example, when converting iso-2022-jp(cp50221) to unicode: + * in ascii sequence: mode=0xC42C0000 + * in jisx0208 sequence: mode=0xC42C0001 + * "C42C" is same for each convert session. + * It should be: ((codepage-1)<<16)|state + */ +static int mlang_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize +) { + int len; + int insize; + HRESULT hr; + + len = cv->mblen(cv, buf, bufsize); + if (len == -1) + return -1; + insize = len; + hr = ConvertINetMultiByteToUnicode(&cv->mode, cv->codepage, + (const char *)buf, &insize, (wchar_t *)wbuf, wbufsize); + if (hr != S_OK || insize != len) + return seterror(EILSEQ); + return len; +} + +static int mlang_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize) { + char tmpbuf[MB_CHAR_MAX]; /* enough room for one character */ + int tmpsize = MB_CHAR_MAX; + int insize = wbufsize; + HRESULT hr; + + hr = ConvertINetUnicodeToMultiByte(&cv->mode, cv->codepage, + (const wchar_t *)wbuf, &wbufsize, tmpbuf, &tmpsize); + if (hr != S_OK || insize != wbufsize) + return seterror(EILSEQ); + else if (bufsize < tmpsize) + return seterror(E2BIG); + else if (cv->mblen(cv, (uchar *)tmpbuf, tmpsize) != tmpsize) + return seterror(EILSEQ); + memcpy(buf, tmpbuf, tmpsize); + return tmpsize; +} + +static int utf16_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize +) { + int codepage = cv->codepage; + + /* swap endian: 1200 <-> 1201 */ + if (cv->mode & UNICODE_MODE_SWAPPED) + codepage ^= 1; + + if (bufsize < 2) + return seterror(EINVAL); + if (codepage == 1200) /* little endian */ + wbuf[0] = (buf[1] << 8) | buf[0]; + else if (codepage == 1201) /* big endian */ + wbuf[0] = (buf[0] << 8) | buf[1]; + + if ((cv->flags & FLAG_USE_BOM) && + !(cv->mode & UNICODE_MODE_BOM_DONE)) { + cv->mode |= UNICODE_MODE_BOM_DONE; + if (wbuf[0] == 0xFFFE) { + cv->mode |= UNICODE_MODE_SWAPPED; + *wbufsize = 0; + return 2; + } else if (wbuf[0] == 0xFEFF) { + *wbufsize = 0; + return 2; + } + } + + if (0xDC00 <= wbuf[0] && wbuf[0] <= 0xDFFF) + return seterror(EILSEQ); + if (0xD800 <= wbuf[0] && wbuf[0] <= 0xDBFF) { + if (bufsize < 4) + return seterror(EINVAL); + if (codepage == 1200) /* little endian */ + wbuf[1] = (buf[3] << 8) | buf[2]; + else if (codepage == 1201) /* big endian */ + wbuf[1] = (buf[2] << 8) | buf[3]; + if (!(0xDC00 <= wbuf[1] && wbuf[1] <= 0xDFFF)) + return seterror(EILSEQ); + *wbufsize = 2; + return 4; + } + *wbufsize = 1; + return 2; +} + +static int utf16_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize +) { + if ((cv->flags & FLAG_USE_BOM) && + !(cv->mode & UNICODE_MODE_BOM_DONE)) { + int r; + + cv->mode |= UNICODE_MODE_BOM_DONE; + if (bufsize < 2) + return seterror(E2BIG); + if (cv->codepage == 1200) /* little endian */ + memcpy(buf, "\xFF\xFE", 2); + else if (cv->codepage == 1201) /* big endian */ + memcpy(buf, "\xFE\xFF", 2); + + r = utf16_wctomb(cv, wbuf, wbufsize, buf + 2, bufsize - 2); + if (r == -1) + return -1; + return r + 2; + } + + if (bufsize < 2) + return seterror(E2BIG); + if (cv->codepage == 1200) { /* little endian */ + buf[0] = (wbuf[0] & 0x00FF); + buf[1] = (wbuf[0] & 0xFF00) >> 8; + } else if (cv->codepage == 1201) { /* big endian */ + buf[0] = (wbuf[0] & 0xFF00) >> 8; + buf[1] = (wbuf[0] & 0x00FF); + } + if (0xD800 <= wbuf[0] && wbuf[0] <= 0xDBFF) { + if (bufsize < 4) + return seterror(E2BIG); + if (cv->codepage == 1200) { /* little endian */ + buf[2] = (wbuf[1] & 0x00FF); + buf[3] = (wbuf[1] & 0xFF00) >> 8; + } else if (cv->codepage == 1201) { /* big endian */ + buf[2] = (wbuf[1] & 0xFF00) >> 8; + buf[3] = (wbuf[1] & 0x00FF); + } + return 4; + } + return 2; +} + +static int utf32_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize +) { + int codepage = cv->codepage; + uint wc = 0xD800; + + /* swap endian: 12000 <-> 12001 */ + if (cv->mode & UNICODE_MODE_SWAPPED) + codepage ^= 1; + + if (bufsize < 4) + return seterror(EINVAL); + if (codepage == 12000) /* little endian */ + wc = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + else if (codepage == 12001) /* big endian */ + wc = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; + + if ((cv->flags & FLAG_USE_BOM) && !(cv->mode & UNICODE_MODE_BOM_DONE)) { + cv->mode |= UNICODE_MODE_BOM_DONE; + if (wc == 0xFFFE0000) { + cv->mode |= UNICODE_MODE_SWAPPED; + *wbufsize = 0; + return 4; + } else if (wc == 0x0000FEFF) { + *wbufsize = 0; + return 4; + } + } + + if ((0xD800 <= wc && wc <= 0xDFFF) || 0x10FFFF < wc) + return seterror(EILSEQ); + ucs4_to_utf16(wc, wbuf, wbufsize); + return 4; +} + +static int utf32_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize +) { + uint wc; + + if ((cv->flags & FLAG_USE_BOM) && !(cv->mode & UNICODE_MODE_BOM_DONE)) { + int r; + + cv->mode |= UNICODE_MODE_BOM_DONE; + if (bufsize < 4) + return seterror(E2BIG); + if (cv->codepage == 12000) /* little endian */ + memcpy(buf, "\xFF\xFE\x00\x00", 4); + else if (cv->codepage == 12001) /* big endian */ + memcpy(buf, "\x00\x00\xFE\xFF", 4); + + r = utf32_wctomb(cv, wbuf, wbufsize, buf + 4, bufsize - 4); + if (r == -1) + return -1; + return r + 4; + } + + if (bufsize < 4) + return seterror(E2BIG); + wc = utf16_to_ucs4(wbuf); + if (cv->codepage == 12000) { /* little endian */ + buf[0] = wc & 0x000000FF; + buf[1] = (wc & 0x0000FF00) >> 8; + buf[2] = (wc & 0x00FF0000) >> 16; + buf[3] = (wc & 0xFF000000) >> 24; + } else if (cv->codepage == 12001) { /* big endian */ + buf[0] = (wc & 0xFF000000) >> 24; + buf[1] = (wc & 0x00FF0000) >> 16; + buf[2] = (wc & 0x0000FF00) >> 8; + buf[3] = wc & 0x000000FF; + } + return 4; +} + +/* + * 50220: ISO 2022 Japanese with no halfwidth Katakana; Japanese (JIS) + * 50221: ISO 2022 Japanese with halfwidth Katakana; Japanese (JIS-Allow + * 1 byte Kana) + * 50222: ISO 2022 Japanese JIS X 0201-1989; Japanese (JIS-Allow 1 byte + * Kana - SO/SI) + * + * MultiByteToWideChar() and WideCharToMultiByte() behave differently + * depending on Windows version. On XP, WideCharToMultiByte() doesn't + * terminate result sequence with ascii escape. But Vista does. + * Use MLang instead. + */ + +#define ISO2022_MODE(cs, shift) (((cs) << 8) | (shift)) +#define ISO2022_MODE_CS(mode) (((mode) >> 8) & 0xFF) +#define ISO2022_MODE_SHIFT(mode) ((mode) & 0xFF) + +#define ISO2022_SI 0 +#define ISO2022_SO 1 + +/* shift in */ +static const char iso2022_SI_seq[] = "\x0F"; +/* shift out */ +static const char iso2022_SO_seq[] = "\x0E"; + +typedef struct iso2022_esc_t iso2022_esc_t; +struct iso2022_esc_t { + const char *esc; + int esc_len; + int len; + int cs; +}; + +#define ISO2022JP_CS_ASCII 0 +#define ISO2022JP_CS_JISX0201_ROMAN 1 +#define ISO2022JP_CS_JISX0201_KANA 2 +#define ISO2022JP_CS_JISX0208_1978 3 +#define ISO2022JP_CS_JISX0208_1983 4 +#define ISO2022JP_CS_JISX0212 5 + +static iso2022_esc_t iso2022jp_esc[] = { + {"\x1B\x28\x42", 3, 1, ISO2022JP_CS_ASCII}, + {"\x1B\x28\x4A", 3, 1, ISO2022JP_CS_JISX0201_ROMAN}, + {"\x1B\x28\x49", 3, 1, ISO2022JP_CS_JISX0201_KANA}, + /* unify 1978 with 1983 */ + {"\x1B\x24\x40", 3, 2, ISO2022JP_CS_JISX0208_1983}, + {"\x1B\x24\x42", 3, 2, ISO2022JP_CS_JISX0208_1983}, + {"\x1B\x24\x28\x44", 4, 2, ISO2022JP_CS_JISX0212}, + {NULL, 0, 0, 0} +}; + +static int iso2022jp_mbtowc( + csconv_t *cv, + const uchar *buf, + int bufsize, + ushort *wbuf, + int *wbufsize +) { + iso2022_esc_t *iesc = iso2022jp_esc; + char tmp[MB_CHAR_MAX]; + int insize; + HRESULT hr; + DWORD dummy = 0; + int len; + int esc_len; + int cs; + int shift; + int i; + + if (buf[0] == 0x1B) { + for (i = 0; iesc[i].esc != NULL; ++i) { + esc_len = iesc[i].esc_len; + if (bufsize < esc_len) { + if (strncmp((char *)buf, iesc[i].esc, bufsize) == 0) + return seterror(EINVAL); + } else { + if (strncmp((char *)buf, iesc[i].esc, esc_len) == 0) { + cv->mode = ISO2022_MODE(iesc[i].cs, ISO2022_SI); + *wbufsize = 0; + return esc_len; + } + } + } + /* not supported escape sequence */ + return seterror(EILSEQ); + } else if (buf[0] == iso2022_SO_seq[0]) { + cv->mode = ISO2022_MODE(ISO2022_MODE_CS(cv->mode), ISO2022_SO); + *wbufsize = 0; + return 1; + } else if (buf[0] == iso2022_SI_seq[0]) { + cv->mode = ISO2022_MODE(ISO2022_MODE_CS(cv->mode), ISO2022_SI); + *wbufsize = 0; + return 1; + } + + cs = ISO2022_MODE_CS(cv->mode); + shift = ISO2022_MODE_SHIFT(cv->mode); + + /* reset the mode for informal sequence */ + if (buf[0] < 0x20) { + cs = ISO2022JP_CS_ASCII; + shift = ISO2022_SI; + } + + len = iesc[cs].len; + if (bufsize < len) + return seterror(EINVAL); + for (i = 0; i < len; ++i) + if (!(buf[i] < 0x80)) + return seterror(EILSEQ); + esc_len = iesc[cs].esc_len; + memcpy(tmp, iesc[cs].esc, esc_len); + if (shift == ISO2022_SO) { + memcpy(tmp + esc_len, iso2022_SO_seq, 1); + esc_len += 1; + } + memcpy(tmp + esc_len, buf, len); + + if ((cv->codepage == 50220 || cv->codepage == 50221 + || cv->codepage == 50222) && shift == ISO2022_SO) { + /* XXX: shift-out cannot be used for mbtowc (both kernel and + * mlang) */ + esc_len = iesc[ISO2022JP_CS_JISX0201_KANA].esc_len; + memcpy(tmp, iesc[ISO2022JP_CS_JISX0201_KANA].esc, esc_len); + memcpy(tmp + esc_len, buf, len); + } + + insize = len + esc_len; + hr = ConvertINetMultiByteToUnicode(&dummy, cv->codepage, + (const char *)tmp, &insize, (wchar_t *)wbuf, wbufsize); + if (hr != S_OK || insize != len + esc_len) + return seterror(EILSEQ); + + /* Check for conversion error. Assuming defaultChar is 0x3F. */ + /* ascii should be converted from ascii */ + if (wbuf[0] == buf[0] + && cv->mode != ISO2022_MODE(ISO2022JP_CS_ASCII, ISO2022_SI)) + return seterror(EILSEQ); + + /* reset the mode for informal sequence */ + if (cv->mode != ISO2022_MODE(cs, shift)) + cv->mode = ISO2022_MODE(cs, shift); + + return len; +} + +static int iso2022jp_wctomb( + csconv_t *cv, + ushort *wbuf, + int wbufsize, + uchar *buf, + int bufsize +) { + iso2022_esc_t *iesc = iso2022jp_esc; + char tmp[MB_CHAR_MAX]; + int tmpsize = MB_CHAR_MAX; + int insize = wbufsize; + HRESULT hr; + DWORD dummy = 0; + int len; + int esc_len; + int cs; + int shift; + int i; + + /* + * MultiByte = [escape sequence] + character + [escape sequence] + * + * Whether trailing escape sequence is added depends on which API is + * used (kernel or MLang, and its version). + */ + hr = ConvertINetUnicodeToMultiByte(&dummy, cv->codepage, + (const wchar_t *)wbuf, &wbufsize, tmp, &tmpsize); + if (hr != S_OK || insize != wbufsize) + return seterror(EILSEQ); + else if (bufsize < tmpsize) + return seterror(E2BIG); + + if (tmpsize == 1) { + cs = ISO2022JP_CS_ASCII; + esc_len = 0; + } else { + for (i = 1; iesc[i].esc != NULL; ++i) { + esc_len = iesc[i].esc_len; + if (strncmp(tmp, iesc[i].esc, esc_len) == 0) { + cs = iesc[i].cs; + break; + } + } + if (iesc[i].esc == NULL) + /* not supported escape sequence */ + return seterror(EILSEQ); + } + + shift = ISO2022_SI; + if (tmp[esc_len] == iso2022_SO_seq[0]) { + shift = ISO2022_SO; + esc_len += 1; + } + + len = iesc[cs].len; + + /* Check for converting error. Assuming defaultChar is 0x3F. */ + /* ascii should be converted from ascii */ + if (cs == ISO2022JP_CS_ASCII && !(wbuf[0] < 0x80)) + return seterror(EILSEQ); + else if (tmpsize < esc_len + len) + return seterror(EILSEQ); + + if (cv->mode == ISO2022_MODE(cs, shift)) { + /* remove escape sequence */ + if (esc_len != 0) + memmove(tmp, tmp + esc_len, len); + esc_len = 0; + } else { + if (cs == ISO2022JP_CS_ASCII) { + esc_len = iesc[ISO2022JP_CS_ASCII].esc_len; + memmove(tmp + esc_len, tmp, len); + memcpy(tmp, iesc[ISO2022JP_CS_ASCII].esc, esc_len); + } + if (ISO2022_MODE_SHIFT(cv->mode) == ISO2022_SO) { + /* shift-in before changing to other mode */ + memmove(tmp + 1, tmp, len + esc_len); + memcpy(tmp, iso2022_SI_seq, 1); + esc_len += 1; + } + } + + if (bufsize < len + esc_len) + return seterror(E2BIG); + memcpy(buf, tmp, len + esc_len); + cv->mode = ISO2022_MODE(cs, shift); + return len + esc_len; +} + +static int iso2022jp_flush( + csconv_t *cv, + uchar *buf, + int bufsize +) { + iso2022_esc_t *iesc = iso2022jp_esc; + int esc_len; + + if (cv->mode != ISO2022_MODE(ISO2022JP_CS_ASCII, ISO2022_SI)) { + esc_len = 0; + if (ISO2022_MODE_SHIFT(cv->mode) != ISO2022_SI) + esc_len += 1; + if (ISO2022_MODE_CS(cv->mode) != ISO2022JP_CS_ASCII) + esc_len += iesc[ISO2022JP_CS_ASCII].esc_len; + if (bufsize < esc_len) + return seterror(E2BIG); + + esc_len = 0; + if (ISO2022_MODE_SHIFT(cv->mode) != ISO2022_SI) { + memcpy(buf, iso2022_SI_seq, 1); + esc_len += 1; + } + if (ISO2022_MODE_CS(cv->mode) != ISO2022JP_CS_ASCII) { + memcpy(buf + esc_len, iesc[ISO2022JP_CS_ASCII].esc, + iesc[ISO2022JP_CS_ASCII].esc_len); + esc_len += iesc[ISO2022JP_CS_ASCII].esc_len; + } + return esc_len; + } + return 0; +} + +#if defined(MAKE_DLL) && defined(USE_LIBICONV_DLL) +BOOL WINAPI DllMain( + HINSTANCE hinstDLL, + DWORD fdwReason, + LPVOID lpReserved +) { + switch ( fdwReason ) { + case DLL_PROCESS_ATTACH: + hwiniconv = (HMODULE)hinstDLL; + break; + case DLL_THREAD_ATTACH: + case DLL_THREAD_DETACH: + case DLL_PROCESS_DETACH: + break; + } + return TRUE; +} +#endif + +#if defined(MAKE_EXE) +#include <stdio.h> +#include <fcntl.h> +#include <io.h> +int main(int argc, char **argv) { + char *fromcode = NULL; + char *tocode = NULL; + int i; + char inbuf[BUFSIZ]; + char outbuf[BUFSIZ]; + const char *pin; + char *pout; + size_t inbytesleft; + size_t outbytesleft; + size_t rest = 0; + iconv_t cd; + size_t r; + FILE *in = stdin; + FILE *out = stdout; + int ignore = 0; + char *p; + + _setmode(_fileno(stdin), _O_BINARY); + _setmode(_fileno(stdout), _O_BINARY); + + for (i = 1; i < argc; ++i) { + if (strcmp(argv[i], "-l") == 0) { + for (i = 0; codepage_alias[i].name != NULL; ++i) + printf("%s\n", codepage_alias[i].name); + return 0; + } + + if (strcmp(argv[i], "-f") == 0) { + fromcode = argv[++i]; + } else if (strcmp(argv[i], "-t") == 0) { + tocode = argv[++i]; + } else if (strcmp(argv[i], "-c") == 0) { + ignore = 1; + } else if (strcmp(argv[i], "--output") == 0) { + out = fopen(argv[++i], "wb"); + if (out == NULL) { + fprintf(stderr, "cannot open %s\n", argv[i]); + return 1; + } + } else { + in = fopen(argv[i], "rb"); + if (in == NULL) { + fprintf(stderr, "cannot open %s\n", argv[i]); + return 1; + } + break; + } + } + + if (fromcode == NULL || tocode == NULL) { + printf("usage: %s [-c] -f from-enc -t to-enc [file]\n", argv[0]); + return 0; + } + + if (ignore) { + p = tocode; + tocode = (char *)malloc(strlen(p) + strlen("//IGNORE") + 1); + if (tocode == NULL) { + perror("fatal error"); + return 1; + } + strcpy(tocode, p); //NOLINT + strcat(tocode, "//IGNORE"); //NOLINT + } + + cd = iconv_open(tocode, fromcode); + if (cd == (iconv_t)(-1)) { + perror("iconv_open error"); + return 1; + } + + while ((inbytesleft = fread( + inbuf + rest, 1, + sizeof(inbuf) - rest, in)) != 0 + || rest != 0) { + inbytesleft += rest; + pin = inbuf; + pout = outbuf; + outbytesleft = sizeof(outbuf); + r = iconv(cd, &pin, &inbytesleft, &pout, &outbytesleft); + fwrite(outbuf, 1, sizeof(outbuf) - outbytesleft, out); + if (r == (size_t)(-1) && + errno != E2BIG && + (errno != EINVAL || feof(in))) { + perror("conversion error"); + return 1; + } + memmove(inbuf, pin, inbytesleft); + rest = inbytesleft; + } + pout = outbuf; + outbytesleft = sizeof(outbuf); + r = iconv(cd, NULL, NULL, &pout, &outbytesleft); + fwrite(outbuf, 1, sizeof(outbuf) - outbytesleft, out); + if (r == (size_t)(-1)) { + perror("conversion error"); + return 1; + } + + iconv_close(cd); + + return 0; +} +#endif diff --git a/pandas/_libs/src/librdata/win_iconv.h b/pandas/_libs/src/librdata/win_iconv.h new file mode 100644 index 0000000000000..da6e9fa4ab96a --- /dev/null +++ b/pandas/_libs/src/librdata/win_iconv.h @@ -0,0 +1,48 @@ +/* + +win-iconv - iconv implementation using Win32 API to convert. +Written in 2009-2016 by Yukihiro Nakadaira <https://github.com/ynkdir> +and contributors to win-iconv <https://github.com/win-iconv/win-iconv> + +To the extent possible under law, the author(s) have dedicated all copyright +and related and neighboring rights to this software to the public domain +worldwide. This software is distributed without any warranty. + +You should have received a copy of the CC0 Public Domain Dedication along with +this software. If not, see http://creativecommons.org/publicdomain/zero/1.0/. + + */ + +#ifndef PANDAS__LIBS_SRC_LIBRDATA_WIN_ICONV_H_ +#define PANDAS__LIBS_SRC_LIBRDATA_WIN_ICONV_H_ + +// #ifndef _LIBICONV_H + #define _LIBICONV_H + #include <stddef.h> + #ifndef WINICONV_CONST + # ifdef ICONV_CONST + # define WINICONV_CONST ICONV_CONST + # else + # define WINICONV_CONST const + # endif + #endif + #ifdef __cplusplus + extern "C" { + #endif + + typedef void* iconv_t; + iconv_t iconv_open(const char *tocode, const char *fromcode); + int iconv_close(iconv_t cd); + size_t iconv( + iconv_t cd, + WINICONV_CONST char **inbuf, + size_t *inbytesleft, + char **outbuf, + size_t *outbytesleft); + + #ifdef __cplusplus + } + #endif +// #endif + +#endif // PANDAS__LIBS_SRC_LIBRDATA_WIN_ICONV_H_ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6cc12ccfba22e..82caa844b2751 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2336,6 +2336,136 @@ def _from_arrays( ) return cls(mgr) + @doc(storage_options=generic._shared_docs["storage_options"]) + def to_rdata( + self, + path_or_buffer: FilePathOrBuffer, + file_format: str = "infer", + rda_name: str = "pandas_dataframe", + index: bool = True, + compression: CompressionOptions = "gzip", + storage_options: StorageOptions = None, + ) -> None: + """ + Render one or more DataFrames to R data (.RData, .rda, .rds). + + .. versionadded:: 1.3.0 + + Parameters + ---------- + path_or_buffer : a valid str, path object or file-like object + Any valid string path is acceptable. + + file_format : {{'infer', 'rda', 'rdata', 'rds'}}, default 'infer' + R serialization type generated from native commands: base::save + (that saves multiple objects) or base::saveRDS (that saves a + single object to disk). Default 'infer' will use extension in file + name to determine the format type. + + rda_name : str, default "pandas_dataframe" + Name for R data.frame in RData/rda file. + + index : bool, default True + Include index or MulitIndex in output as separate columns. Since + DataFrame indexes can include multiple columns and R rownames can + only include one column, DataFrame index will not map to R + data.frame rownames. + + compression : {{'gzip', 'bz2', 'xz', None}}, default 'gzip' + Compression type for on-the-fly decompression of on-disk data. + + {storage_options} + + Raises + ------ + LibrdataWriterError + * If DataFrame types or values do not conform to R data types. + + See Also + -------- + to_stata : Convert DataFrame to a Stata dataset. + + Notes + ----- + For more information of R serialization data types, see docs on + rda_ and rds_ formats. + + .. _rda: https://www.rdocumentation.org/packages/base/versions/3.6.2/\ +topics/save + .. _rds: https://www.rdocumentation.org/packages/base/versions/3.6.2/\ +topics/readRDS + + Examples + -------- + To save an .rds file which only contains a single DataFrame: + + >>> ghg_df = pd.DataFrame( + ... {{'gas': ['Carbon dioxide', 'Methane', + ... 'Nitrous oxide', + ... 'Fluorinated gases', + ... 'Total'], + ... 'year': [2018, 2018, 2018, 2018, 2018], + ... 'emissions': [5424.88, 634.46, 434.53, + ... 182.78, 6676.65] + ... }}) + >>> ghg_df.to_rdata("ghg_df.rds") # doctest: +SKIP + + >>> R_code = ''' + ... ghg_df <- readRDS("ghg_df.rds") + ... ghg_df + ... index gas year emissions + ... 1 0 Carbon dioxide 2018 5424.88 + ... 2 1 Methane 2018 634.46 + ... 3 2 Nitrous oxide 2018 434.53 + ... 4 3 Fluorinated gases 2018 182.78 + ... 5 4 Total 2018 6676.65 + ... ''' + + To save an .RData or .rda file: + + >>> plants_df = pd.DataFrame( + ... {{'plant_group': ['Pteridophytes', + ... 'Pteridophytes', + ... 'Pteridophytes', + ... 'Pteridophytes', + ... 'Pteridophytes'], + ... 'status': ['Data Deficient', + ... 'Extinct', + ... 'Not Threatened', + ... 'Possibly Threatened', + ... 'Threatened'], + ... 'count': [398, 65, 1294, 408, 1275] + ... }}) + >>> plants_df.to_rdata( + ... "plants_df.rda", + ... rda_name="plants_df", + ... ) # doctest: +SKIP + + >>> R_code = ''' + ... load("plants_df.rda") + ... + ... mget(ls()) + ... $plants_df + ... index plant_group status count + ... 1 0 Pteridophytes Data Deficient 398 + ... 2 1 Pteridophytes Extinct 65 + ... 3 2 Pteridophytes Not Threatened 1294 + ... 4 3 Pteridophytes Possibly Threatened 408 + ... 5 4 Pteridophytes Threatened 1275 + ... ''' + """ + from pandas.io.rdata.rdata_writer import RDataWriter + + RDataWriter( + self, + path_or_buffer=path_or_buffer, + file_format=file_format, + rda_name=rda_name, + index=index, + compression=compression, + storage_options=storage_options, + ).write_data() + @doc(storage_options=generic._shared_docs["storage_options"]) @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_stata( diff --git a/pandas/io/api.py b/pandas/io/api.py index 5926f2166ee9d..9cacb014e7dd0 100644 --- a/pandas/io/api.py +++ b/pandas/io/api.py @@ -29,6 +29,7 @@ HDFStore, read_hdf, ) +from pandas.io.rdata import read_rdata from pandas.io.sas import read_sas from pandas.io.spss import read_spss from pandas.io.sql import ( diff --git a/pandas/io/rdata/__init__.py b/pandas/io/rdata/__init__.py new file mode 100644 index 0000000000000..41ebfa536ec3b --- /dev/null +++ b/pandas/io/rdata/__init__.py @@ -0,0 +1,3 @@ +from pandas.io.rdata.rdata_reader import read_rdata + +__all__ = ["read_rdata"] diff --git a/pandas/io/rdata/_rdata.pxd b/pandas/io/rdata/_rdata.pxd new file mode 100644 index 0000000000000..1ed11347d72be --- /dev/null +++ b/pandas/io/rdata/_rdata.pxd @@ -0,0 +1,280 @@ +# cython: c_string_type=str, c_string_encoding=utf8, language_level=3 + +from posix.types cimport off_t + +from libc.stdint cimport ( + int32_t, + int64_t, +) +from libc.time cimport ( + mktime, + time_t, + tm, +) + + +cdef extern from '../../_libs/src/librdata/rdata.h': + + ctypedef enum rdata_type_t: + RDATA_TYPE_STRING, + RDATA_TYPE_INT32, + RDATA_TYPE_REAL, + RDATA_TYPE_LOGICAL, + RDATA_TYPE_TIMESTAMP, + RDATA_TYPE_DATE + + ctypedef enum rdata_error_t: + RDATA_OK, + RDATA_ERROR_OPEN = 1, + RDATA_ERROR_SEEK, + RDATA_ERROR_READ, + RDATA_ERROR_MALLOC, + RDATA_ERROR_USER_ABORT, + RDATA_ERROR_PARSE, + RDATA_ERROR_WRITE, + RDATA_ERROR_FACTOR, + RDATA_ERROR_UNSUPPORTED_COMPRESSION, + RDATA_ERROR_UNSUPPORTED_CHARSET, + RDATA_ERROR_CONVERT, + RDATA_ERROR_CONVERT_BAD_STRING, + RDATA_ERROR_CONVERT_LONG_STRING, + RDATA_ERROR_CONVERT_SHORT_STRING, + RDATA_ERROR_UNSUPPORTED_S_EXPRESSION, + RDATA_ERROR_UNSUPPORTED_STORAGE_CLASS + + ctypedef enum rdata_file_format_t: + RDATA_WORKSPACE, + RDATA_SINGLE_OBJECT + + cdef const char *rdata_error_message(rdata_error_t error_code) + + ctypedef int (*rdata_column_handler)( + const char *name, rdata_type_t type, + void *data, long count, void *ctx + ) except * + ctypedef int ( + *rdata_table_handler)(const char *name, void *ctx + ) except * + ctypedef int ( + *rdata_text_value_handler)(const char *value, int index, void *ctx + ) except * + ctypedef int ( + *rdata_column_name_handler)(const char *value, int index, void *ctx + ) except * + ctypedef void (*rdata_error_handler)(const char *error_message, void *ctx) + ctypedef int (*rdata_progress_handler)(double progress, void *ctx) + + IF UNAME_SYSNAME == "AIX": + ctypedef off64_t rdata_off_t + ELSE: + ctypedef off_t rdata_off_t + + # Read API + + ctypedef enum rdata_io_flags_t: + RDATA_SEEK_SET, + RDATA_SEEK_CUR, + RDATA_SEEK_END + + ctypedef int (*rdata_open_handler)(const char *path, void *io_ctx) + ctypedef int (*rdata_close_handler)(void *io_ctx) + ctypedef rdata_off_t ( + *rdata_seek_handler + )(rdata_off_t offset, rdata_io_flags_t whence, void *io_ctx) + ctypedef ssize_t ( + *rdata_read_handler + )(void *buf, size_t nbyte, void *io_ctx) + ctypedef rdata_error_t ( + *rdata_update_handler + )( + long file_size, + rdata_progress_handler progress_handler, + void *user_ctx, + void *io_ctx + ) + + ctypedef struct rdata_io_t: + rdata_open_handler open + rdata_close_handler close + rdata_seek_handler seek + rdata_read_handler read + rdata_update_handler update + void *io_ctx + int external_io + + ctypedef struct rdata_parser_t: + rdata_table_handler table_handler + rdata_column_handler column_handler + rdata_column_name_handler column_name_handler + rdata_column_name_handler row_name_handler + rdata_text_value_handler text_value_handler + rdata_text_value_handler value_label_handler + rdata_column_handler dim_handler + rdata_text_value_handler dim_name_handler + rdata_error_handler error_handler + rdata_io_t *io + + cdef rdata_parser_t *rdata_parser_init() + cdef void rdata_parser_free(rdata_parser_t *parser) + + cdef rdata_error_t rdata_set_table_handler( + rdata_parser_t *parser, rdata_table_handler table_handler + ) + cdef rdata_error_t rdata_set_column_handler( + rdata_parser_t *parser, rdata_column_handler column_handler + ) + cdef rdata_error_t rdata_set_column_name_handler( + rdata_parser_t *parser, rdata_column_name_handler column_name_handler + ) + cdef rdata_error_t rdata_set_row_name_handler( + rdata_parser_t *parser, rdata_column_name_handler row_name_handler + ) + cdef rdata_error_t rdata_set_text_value_handler( + rdata_parser_t *parser, rdata_text_value_handler text_value_handler + ) + cdef rdata_error_t rdata_set_value_label_handler( + rdata_parser_t *parser, rdata_text_value_handler value_label_handler + ) + cdef rdata_error_t rdata_set_dim_handler( + rdata_parser_t *parser, rdata_column_handler dim_handler + ) + cdef rdata_error_t rdata_set_dim_name_handler( + rdata_parser_t *parser, rdata_text_value_handler dim_name_handler + ) + cdef rdata_error_t rdata_set_error_handler( + rdata_parser_t *parser, rdata_error_handler error_handler + ) + cdef rdata_error_t rdata_set_open_handler( + rdata_parser_t *parser, rdata_open_handler open_handler + ) + cdef rdata_error_t rdata_set_close_handler( + rdata_parser_t *parser, rdata_close_handler close_handler + ) + cdef rdata_error_t rdata_set_seek_handler( + rdata_parser_t *parser, rdata_seek_handler seek_handler + ) + cdef rdata_error_t rdata_set_read_handler( + rdata_parser_t *parser, rdata_read_handler read_handler + ) + cdef rdata_error_t rdata_set_update_handler( + rdata_parser_t *parser, rdata_update_handler update_handler + ) + cdef rdata_error_t rdata_set_io_ctx( + rdata_parser_t *parser, void *io_ctx + ) + cdef rdata_error_t rdata_parse( + rdata_parser_t *parser, const char *filename, void *user_ctx + ) + + # Write API + ctypedef ssize_t ( + *rdata_data_writer)(const void *data, size_t len, void *ctx + ) + + ctypedef struct rdata_column_t: + rdata_type_t type + int index + char name[256] + char label[1024] + + int32_t factor_count + char **factor + + ctypedef struct rdata_writer_t: + rdata_file_format_t file_format + rdata_data_writer data_writer + size_t bytes_written + + rdata_error_handler error_handler + void *user_ctx + + void *atom_table + int bswap + + rdata_column_t **columns + int32_t columns_count + int32_t columns_capacity + + cdef rdata_writer_t *rdata_writer_init( + rdata_data_writer write_callback, rdata_file_format_t format + ) + cdef void rdata_writer_free(rdata_writer_t *writer) + + cdef rdata_column_t *rdata_add_column( + rdata_writer_t *writer, const char *name, rdata_type_t type + ) + + cdef rdata_error_t rdata_column_set_label( + rdata_column_t *column, const char *label + ) + cdef rdata_error_t rdata_column_add_factor( + rdata_column_t *column, const char *factor + ) + + cdef rdata_column_t *rdata_get_column(rdata_writer_t *writer, int32_t j) + + cdef rdata_error_t rdata_begin_file(rdata_writer_t *writer, void *ctx) + cdef rdata_error_t rdata_begin_table( + rdata_writer_t *writer, const char *variable_name + ) + cdef rdata_error_t rdata_begin_column( + rdata_writer_t *writer, rdata_column_t *column, int32_t row_count + ) + + cdef rdata_error_t rdata_append_real_value( + rdata_writer_t *writer, double value + ) + cdef rdata_error_t rdata_append_int32_value( + rdata_writer_t *writer, int32_t value + ) + cdef rdata_error_t rdata_append_timestamp_value( + rdata_writer_t *writer, time_t value + ) + cdef rdata_error_t rdata_append_date_value( + rdata_writer_t *writer, tm *value + ) + cdef rdata_error_t rdata_append_logical_value( + rdata_writer_t *writer, int value + ) + cdef rdata_error_t rdata_append_string_value( + rdata_writer_t *writer, const char *value + ) + + cdef rdata_error_t rdata_end_column( + rdata_writer_t *writer, rdata_column_t *column + ) + cdef rdata_error_t rdata_end_table( + rdata_writer_t *writer, int32_t row_count, const char *datalabel + ) + cdef rdata_error_t rdata_end_file( + rdata_writer_t *writer + ) + + IF UNAME_SYSNAME == "Windows": + cdef extern from "<sys/stat.h>": + int _sopen(const char *path, int oflag, int shflag, int pmode) + + cdef extern from "<io.h>": + int _close(int fd) + ssize_t _write(int fd, const void *buf, size_t nbyte) + + cdef extern from "<fcntl.h>" nogil: + enum: _O_CREAT + enum: _O_WRONLY + enum: _O_BINARY + enum: _O_U8TEXT + enum: _SH_DENYNO + enum: _S_IREAD + enum: _S_IWRITE + + ELSE: + cdef extern from "<sys/stat.h>": + int open(const char *path, int oflag, int mode) + + cdef extern from "<unistd.h>": + int close(int fd) + ssize_t write(int fd, const void *buf, size_t nbyte) + + cdef extern from "<fcntl.h>" nogil: + enum: O_CREAT + enum: O_WRONLY diff --git a/pandas/io/rdata/_rdata.pyx b/pandas/io/rdata/_rdata.pyx new file mode 100644 index 0000000000000..33b5b41afc6d9 --- /dev/null +++ b/pandas/io/rdata/_rdata.pyx @@ -0,0 +1,461 @@ +# cython: c_string_type=str, c_string_encoding=utf8, language_level=3 + +cdef int handle_table(const char *name, void *ctx) except *: + """ + Retrieves original R object name. + + Called once per data frame in RData files, + and zero times on RDS files. + """ + lbr = <LibrdataReader>ctx + + lbr.colidx = 0 + lbr.rows = 0 + lbr.rlevels = {} + lbr.rtext = {} + lbr.is_factor = False + lbr.rownames = {} + lbr.colnames = {} + lbr.dims = 0 + lbr.dim_str = {} + + if name != NULL: + lbr.tblname = name + + if "r_dataframe" in lbr.rvalues.keys(): + lbr.rvalues[lbr.tblname] = lbr.rvalues.pop("r_dataframe") + else: + lbr.rvalues[lbr.tblname] = { + "data": {}, + "dtypes": {}, + "colnames": None, + "rownames": None + } + return 0 # non-zero to abort processing + + +cdef int handle_column( + const char *name, + rdata_type_t dtype, + void *data, + long count, + void *ctx +) except *: + """ + Parses each non-string column in data frame. + + Called once for all columns with the following caveats: + * `name` is NULL for some columns (see handle_column_name below) + * `data` is NULL for text columns (see handle_text_value below) + Special conditon for matrices with dims attribute. + """ + lbr = <LibrdataReader>ctx + + lbr.rows = count + cdef int *rints = <int*>data + cdef double *rdoubles = <double*>data + + if dtype in [ + rdata_type_t.RDATA_TYPE_REAL, + rdata_type_t.RDATA_TYPE_DATE, + rdata_type_t.RDATA_TYPE_TIMESTAMP + ]: + lbr.rvalues[lbr.tblname]["dtypes"][lbr.colidx] = lbr.rtypes[dtype] + lbr.rvalues[lbr.tblname]["data"][lbr.colidx] = { + i: rdoubles[i] for i in range(count) + } + lbr.colidx += 1 + + elif dtype in [ + rdata_type_t.RDATA_TYPE_INT32, + rdata_type_t.RDATA_TYPE_LOGICAL + ]: + if lbr.is_factor: + lbr.rvalues[lbr.tblname]["dtypes"][lbr.colidx] = "factor" + lbr.rvalues[lbr.tblname]["data"][lbr.colidx] = { + i: float('nan') if rints[i] < 0 else lbr.rlevels[rints[i]-1] + for i in range(count) + } + lbr.is_factor = False + else: + lbr.rvalues[lbr.tblname]["dtypes"][lbr.colidx] = lbr.rtypes[dtype] + lbr.rvalues[lbr.tblname]["data"][lbr.colidx] = { + i: rints[i] for i in range(count) + } + lbr.colidx += 1 + + if lbr.dims > 0: + lbr.tblname = "r_matrix" + lbr.rvalues[lbr.tblname] = lbr.rvalues.pop("r_dataframe") + dim_data = list(lbr.rvalues[lbr.tblname]["data"][0].values()) + + n = 0 + rows, cols = lbr.dim_str.values() + for col in range(cols): + lbr.rvalues[lbr.tblname]["dtypes"][col] = lbr.rtypes[dtype] + lbr.rvalues[lbr.tblname]["data"][col] = { + i: d for i, d in enumerate(dim_data[n:n+rows]) + } + n += rows + + return 0 + +cdef int handle_text_value(const char *value, int index, void *ctx) except *: + """ + Parses string data. + + Called once per row for a text column. + """ + lbr = <LibrdataReader>ctx + + if value != NULL: + try: + lbr.rtext[index] = value + except UnicodeDecodeError: + lbr.rtext[index] = None + else: + lbr.rtext[index] = None + + if index == (lbr.rows - 1): + lbr.rvalues[lbr.tblname]["dtypes"][lbr.colidx] = "str" + lbr.rvalues[lbr.tblname]["data"][lbr.colidx] = lbr.rtext + lbr.colidx += 1 + lbr.rtext = {} + + return 0 + +cdef int handle_value_label(const char *value, int index, void *ctx) except *: + """ + Parses factor levels. + + Called for factor variables, once for each level + """ + lbr = <LibrdataReader>ctx + + lbr.is_factor = True + lbr.rlevels[index] = value + + return 0 + +cdef int handle_dim( + const char *name, + rdata_type_t dtype, + void *data, + long count, + void *ctx +) except *: + """ + Parses meta data on non-dataframe objects + + Called once for objects with R dims (matrices, arrays, etc.)). + Special condition for character matrices. + """ + lbr = <LibrdataReader>ctx + + cdef int *rdims = <int*>data + + lbr.dims = count + lbr.dim_str = {i: rdims[i] for i in range(count)} + + if lbr.rvalues[lbr.tblname]["dtypes"] == {0: "str"}: + dim_data = list(lbr.rvalues[lbr.tblname]["data"][0].values()) + + n = 0 + rows, cols = lbr.dim_str.values() + + for col in range(cols): + lbr.rvalues[lbr.tblname]["dtypes"][col] = "str" + lbr.rvalues[lbr.tblname]["data"][col] = dim_data[n:n+rows] + n += rows + + return 0 + +cdef int handle_column_name(const char *name, int index, void *ctx) except *: + """ + Retrieves column names of data frame + + Returns only non-NULL column names after parsing data. + """ + lbr = <LibrdataReader>ctx + + lbr.colnames[index] = name + lbr.rvalues[lbr.tblname]["colnames"] = lbr.colnames + + return 0 + +cdef int handle_row_name(const char *name, int index, void *ctx) except *: + """ + Retrieves row names of data frame + + Returns only non-NULL row names appear after parsing data. + """ + lbr = <LibrdataReader>ctx + + lbr.rownames[index] = name + lbr.rvalues[lbr.tblname]["rownames"] = lbr.rownames + + return 0 + +cdef int handle_dim_name(const char *name, int index, void *ctx) except *: + """ + Retrieves dim names of matrices or arrays + + Returns only non-NULL dim names appear after parsing data. + """ + + lbr = <LibrdataReader>ctx + + if (index < lbr.dim_str[0]) and lbr.rownames.get(index) is None: + lbr.rownames[index] = name if name != NULL else str(index) + else: + lbr.rvalues[lbr.tblname]["rownames"] = lbr.rownames + + if index < lbr.dim_str[1]: + lbr.colnames[index] = name if name != NULL else str(index) + else: + lbr.rvalues[lbr.tblname]["colnames"] = lbr.colnames + + return 0 + + +class LibrdataReaderError(Exception): + """ + Base error class to capture exceptions in librdata parsing. + """ + pass + + +cdef int length = 40 + + +cdef class LibrdataReader: + """ + Base class to read RData files. + + Class interfaces with librdata C library to builds dictionaries + of each data frame including data content and meta (dtypes, colnames, + and rownames). Callbacks above are used in ``rdata_`` method attributes. + """ + cdef rdata_parser_t *rparser + cdef public: + int colidx + int rows + dict rlevels + dict rtext + bint is_factor + dict rownames + dict colnames + dict rtypes + str tblname + dict rvalues + int dims + dict dim_str + + def read_rdata(self, rfile): + self.rparser = rdata_parser_init() + + self.colidx = 0 + self.rows = 0 + self.rlevels = {} + self.rtext = {} + self.is_factor = False + self.rownames = {} + self.colnames = {} + self.dims = 0 + self.dim_str = {} + self.rtypes = { + rdata_type_t.RDATA_TYPE_LOGICAL: "bool", + rdata_type_t.RDATA_TYPE_INT32: "int", + rdata_type_t.RDATA_TYPE_REAL: "float", + rdata_type_t.RDATA_TYPE_DATE: "date", + rdata_type_t.RDATA_TYPE_TIMESTAMP: "datetime", + rdata_type_t.RDATA_TYPE_STRING: "str" + } + self.tblname = "r_dataframe" + self.rvalues = { + "r_dataframe": { + "data": {}, + "dtypes": {}, + "colnames": None, + "rownames": None + } + } + + err = RDATA_OK + while err == RDATA_OK: + err = rdata_set_table_handler(self.rparser, handle_table) + err = rdata_set_dim_handler(self.rparser, handle_dim) + err = rdata_set_column_handler(self.rparser, handle_column) + err = rdata_set_text_value_handler(self.rparser, handle_text_value) + err = rdata_set_value_label_handler(self.rparser, handle_value_label) + err = rdata_set_column_name_handler(self.rparser, handle_column_name) + err = rdata_set_row_name_handler(self.rparser, handle_row_name) + err = rdata_set_dim_name_handler(self.rparser, handle_dim_name) + + err = rdata_parse(self.rparser, rfile, <void *>self) + rdata_parser_free(self.rparser) + break + + if err != RDATA_OK: + msg = rdata_error_message(err) + raise LibrdataReaderError(msg) + + return self.rvalues + + cdef bytes get_rparser(self): + return <bytes>(<char *>self.rparser)[:sizeof(rdata_parser_t)*length] + + def __reduce__(self): + rparser = self.get_rparser() + return (rebuild_reader, (rparser,)) + +cpdef object rebuild_reader(bytes data): + return LibrdataReader() + + +class LibrdataWriterError(Exception): + """ + Base error class to capture exceptions in librdata writing. + """ + pass + + +cdef ssize_t write_data(const void *bytes, size_t len, void *ctx): + cdef int fd = (<int*>ctx)[0] + + IF UNAME_SYSNAME == "Windows": + result = _write(fd, bytes, len) + ELSE: + result = write(fd, bytes, len) + + return result + +cdef class LibrdataWriter(): + """ + Base class to write RData files. + + Class interfaces with librdata C library to iterate through dictionaries + of each DataFrame column according to correspoinding dtype. + Single callback above is usedd in exposed `init`` method. + """ + cdef: + int fd + int row_count + dict rdict + dict rformats + dict rtypes + bytes file_name + bytes tbl_name + rdata_writer_t *writer + rdata_column_t *py_col + + cdef write_col_data(self, i, kdata, vdata, ktype, vtype): + py_col = rdata_get_column(self.writer, i) + rdata_begin_column(self.writer, py_col, self.row_count) + + if vtype == RDATA_TYPE_LOGICAL: + for k, v in vdata.items(): + rdata_append_logical_value(self.writer, v) + + if vtype == RDATA_TYPE_INT32: + for k, v in vdata.items(): + rdata_append_int32_value(self.writer, v) + + if vtype == RDATA_TYPE_REAL: + for k, v in vdata.items(): + rdata_append_real_value(self.writer, v) + + if vtype == RDATA_TYPE_TIMESTAMP: + for k, v in vdata.items(): + rdata_append_timestamp_value(self.writer, v) + + if vtype == RDATA_TYPE_STRING: + for k, v in vdata.items(): + if v == v: + rdata_append_string_value(self.writer, v) + else: + rdata_append_string_value(self.writer, NULL) + + rdata_end_column(self.writer, py_col) + + def write_rdata(self, rfile, rdict, rformat, tbl_name=None): + + self.rdict = rdict + self.file_name = rfile.encode("utf-8") + self.tbl_name = tbl_name.encode("utf-8") + self.row_count = len(next(iter(rdict["data"].items()))[1]) + + self.rformats = { + "rdata": RDATA_WORKSPACE, + "rda": RDATA_WORKSPACE, + "rds": RDATA_SINGLE_OBJECT + } + + self.rtypes = { + "bool": RDATA_TYPE_LOGICAL, + "int": RDATA_TYPE_INT32, + "float": RDATA_TYPE_REAL, + "datetime": RDATA_TYPE_TIMESTAMP, + "object": RDATA_TYPE_STRING + } + + IF UNAME_SYSNAME == "Windows": + self.fd = _sopen( + self.file_name, + _O_CREAT | _O_WRONLY | _O_BINARY | _O_U8TEXT, + _SH_DENYNO, + _S_IREAD | _S_IWRITE + ) + ELSE: + self.fd = open(self.file_name, O_CREAT | O_WRONLY, 0644) + + self.writer = rdata_writer_init(write_data, self.rformats[rformat]) + + for k, v in self.rdict["dtypes"].items(): + rdata_add_column(self.writer, k, self.rtypes[v]) + + rdata_begin_file(self.writer, &self.fd) + rdata_begin_table(self.writer, self.tbl_name) + + try: + for n, ((kd, vd), (kt, vt)) in enumerate( + zip( + self.rdict["data"].items(), + self.rdict["dtypes"].items() + ) + ): + self.write_col_data(n, kd, vd, kt, self.rtypes[vt]) + + except (TypeError, ValueError, UnicodeDecodeError): + self.close_rdata() + raise LibrdataWriterError( + "DataFrame contains one more invalid types or data values. " + "that does not conform to R data types." + ) + + rdata_end_table(self.writer, self.row_count, "pandas_dataframe") + rdata_end_file(self.writer) + + self.close_rdata() + rdata_writer_free(self.writer) + + cdef close_rdata(self): + IF UNAME_SYSNAME == "Windows": + _close(self.fd) + ELSE: + close(self.fd) + + cdef bytes get_writer(self): + return <bytes>(<char *>self.writer)[:sizeof(rdata_writer_t)*length] + + cdef bytes get_py_col(self): + return <bytes>(<char *>self.py_col)[:sizeof(rdata_column_t)*length] + + def __reduce__(self): + writer = self.get_writer() + py_col = self.get_py_col() + return (rebuild_writer, (writer, py_col)) + + +cpdef object rebuild_writer(bytes data1, bytes data2): + return LibrdataWriter() diff --git a/pandas/io/rdata/rdata_reader.py b/pandas/io/rdata/rdata_reader.py new file mode 100644 index 0000000000000..59f633119537c --- /dev/null +++ b/pandas/io/rdata/rdata_reader.py @@ -0,0 +1,457 @@ +""" +Read R data files (RData, rda, rds). + +This IO module interfaces with the librdata C library by Evan Miller: + https://github.com/WizardMac/librdata +""" +from __future__ import annotations + +import io +import os +from tempfile import TemporaryDirectory + +from pandas._typing import ( + Buffer, + CompressionOptions, + FilePathOrBuffer, + StorageOptions, +) +from pandas.util._decorators import doc + +from pandas.core.dtypes.common import is_list_like + +from pandas.core.api import to_datetime +from pandas.core.arrays import Categorical +from pandas.core.frame import ( + DataFrame, + Index, + Series, +) +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + file_exists, + get_handle, + is_fsspec_url, + is_url, + stringify_path, +) +from pandas.io.rdata._rdata import LibrdataReader + + +@doc(storage_options=_shared_docs["storage_options"]) +def read_rdata( + path_or_buffer: FilePathOrBuffer, + file_format: str = "infer", + select_frames: list[str] | None = None, + rownames: bool = True, + compression: CompressionOptions = "gzip", + storage_options: StorageOptions = None, +) -> dict[str, DataFrame]: + r""" + Read R data (.RData, .rda, .rds) into DataFrame or ``dict`` of DataFrames. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + path_or_buffer : str, path object, or file-like object + Any valid file path is acceptable. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. + + file_format : {{'infer', 'rdata', 'rda', 'rds'}}, default 'infer' + R serialization type as output from R's base::save or base::saveRDS + commands. Default 'infer' will use extension in file name to + to determine the format type. + + select_frames : list, default returns all DataFrames + Selected names of DataFrames to return from R RData and rdata types that + can contain multiple objects. + + rownames : bool, default True + Include original rownames in R data frames to map into a DataFrame index. + + compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'gzip' + For on-the-fly decompression of on-disk data. If 'infer', then use + gzip, bz2, zip or xz if path_or_buffer is a string ending in + '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression + otherwise. If using 'zip', the ZIP file must contain only one data + file to be read in. Set to None for no decompression. This method will + default to 'gzip' since 'gzip2` is the default compression in R for + RData and rds types. + + {storage_options} + + Returns + ------- + Dict of DataFrames + Depends on R data type where rds formats returns a ``dict`` of a single + DataFrame and RData or rda formats can return ``dict`` of one or more + DataFrames. + + See Also + -------- + read_sas : Read SAS datasets into DataFrame. + read_stata : Read Stata datasets into DataFrame. + read_spss : Read SPSS datasets into DataFrame. + + Notes + ----- + Any R data file that contains a non-data.frame object may raise parsing errors. + Method will return data.frame and data.frame like objects such as tibbles and + data.tables. For more information of R serialization data types, see docs on + `rds`<https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/readRDS>__ + and `rda`<https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/save>__ + formats. + + Examples + -------- + For an .rds file which only contains a single R object, method returns a + DataFrame: + + >>> R_code = ''' + ... ghg_df <- data.frame( + ... gas = c('Carbon dioxide', + ... 'Methane', + ... 'Nitrous oxide', + ... 'Fluorinated gases', + ... 'Total'), + ... year = c(2018, + ... 2018, + ... 2018, + ... 2018, + ... 2018), + ... emissions = c(5424.88, + ... 634.46, + ... 434.53, + ... 182.78, + ... 6676.65) + ... ) + ... saveRDS(ghg_df, file="ghg_df.rds") + ... ''' + + >>> ghg_df = pd.read_rdata("ghg_df.rds") # doctest: +SKIP + >>> ghg_df # doctest: +SKIP + {{'r_dataframe': + gas year emissions + rownames + 1 Carbon dioxide 2018 5424.88 + 2 Methane 2018 634.46 + 3 Nitrous oxide 2018 434.53 + 4 Fluorinated gases 2018 182.79 + 5 Total 2018 6676.65}} + + For an .RData or .rda file which can contain multiple R objects, method + returns a ``dict`` of DataFrames: + + >>> R_code = ''' + ... plants_df <- pd.DataFrame( + ... plant_group = c('Pteridophytes', + ... 'Pteridophytes', + ... 'Pteridophytes', + ... 'Pteridophytes', + ... 'Pteridophytes'), + ... status = c('Data Deficient', + ... 'Extinct', + ... 'Not Threatened', + ... 'Possibly Threatened', + ... 'Threatened'), + ... count = c(398, 65, 1294, 408, 1275) + ... ) + ... sea_ice_df <- pd.DataFrame( + ... year = c(2016, 2017, 2018, 2019, 2020), + ... mo = c(12, 12, 12, 12, 12], + ... data.type: c('Goddard', + ... 'Goddard', + ... 'Goddard', + ... 'Goddard', + ... 'NRTSI-G'), + ... region = c('S', 'S', 'S', 'S', 'S'), + ... extent = c(8.28, 9.48, 9.19, 9.41, 10.44), + ... area = c(5.51, 6.23, 5.59, 6.59, 6.5) + ... ) + ... save(ghg_df, plants_df, sea_ice_df, file="env_data_dfs.rda") + ... ''' + + >>> env_dfs = pd.read_rdata("env_data_dfs.rda") # doctest: +SKIP + >>> env_dfs # doctest: +SKIP + {{'ghg_df': + gas year emissions + rownames + 1 Carbon dioxide 2018 5424.88 + 2 Methane 2018 634.46 + 3 Nitrous oxide 2018 434.53 + 4 Fluorinated gases 2018 182.79 + 5 Total 2018 6676.65, + 'plants_df': + plant_group status count + rownames + 1 Pteridophytes Data Deficient 398 + 2 Pteridophytes Extinct 65 + 3 Pteridophytes Not Threatened 1294 + 4 Pteridophytes Possibly Threatened 408 + 5 Pteridophytes Threatened 1275, + 'sea_ice_df': + year mo data.type region extent area + rownames + 1 2016 12 Goddard S 8.28 5.51 + 2 2017 12 Goddard S 9.48 6.23 + 3 2018 12 Goddard S 9.19 5.59 + 4 2019 12 Goddard S 9.41 6.59 + 5 2020 12 NRTSI-G S 10.44 6.50}} + """ + + rdr = _RDataReader( + path_or_buffer, + file_format, + select_frames, + rownames, + compression, + storage_options, + ) + + r_dfs = rdr.parse_data() + + return r_dfs + + +def get_data_from_filepath( + filepath_or_buffer, + encoding, + compression, + storage_options, +) -> str | bytes | Buffer: + """ + Extract raw R data. + + The method accepts three input types: + 1. filepath (string-like) + 2. file-like object (e.g. open file object, BytesIO) + 3. R data file in ascii or binary content + + This method turns (1) into (2) to simplify the rest of the processing. + It returns input types (2) and (3) unchanged. + """ + filepath_or_buffer = stringify_path(filepath_or_buffer) + + if ( + not isinstance(filepath_or_buffer, str) + or is_url(filepath_or_buffer) + or is_fsspec_url(filepath_or_buffer) + or file_exists(filepath_or_buffer) + ): + with get_handle( + filepath_or_buffer, + "rb", + encoding=encoding, + compression=compression, + storage_options=storage_options, + is_text=False, + ) as handle_obj: + filepath_or_buffer = ( + handle_obj.handle.read() + if hasattr(handle_obj.handle, "read") + else handle_obj.handle + ) + else: + raise FileNotFoundError(f"{filepath_or_buffer} file cannot be found.") + + return filepath_or_buffer + + +def preprocess_data(data) -> io.StringIO | io.BytesIO: + """ + Convert extracted raw data. + + This method will return underlying data of extracted R data formats. + The data either has a `read` attribute (e.g. a file object or a + StringIO/BytesIO) or is bytes that represents the R data. + """ + + if isinstance(data, str): + data = io.StringIO(data) + + elif isinstance(data, bytes): + data = io.BytesIO(data) + + return data + + +class _RDataReader: + """ + Internal subclass to parse R data files into dict of DataFrames. + + Parameters + ---------- + path_or_buffer : a valid str, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. + + file_format : {{'infer', 'rdata', 'rda', 'rds'}}, default 'infer' + R serialization type. + + select_frames : list, default None + Selected names of DataFrames to return from R data. + + rownames : bool, default True + Include original rownames in R data frames. + + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' + Compression type for on-the-fly decompression of on-disk data. + If 'infer', then use extension for gzip, bz2, zip or xz. + + storage_options : dict, optional + Extra options that make sense for a particular storage connection, + e.g. host, port, username, password, etc. + """ + + def __init__( + self, + path_or_buffer, + file_format, + select_frames, + rownames, + compression, + storage_options, + ) -> None: + self.path_or_buffer = path_or_buffer + self.file_format = file_format.lower() + self.select_frames = select_frames + self.rownames = rownames + self.compression = compression + self.storage_options = storage_options + self.verify_params() + + def verify_params(self) -> None: + """ + Verify user entries of parameters. + + This method will check the values and types of select parameters + and raise appropriate errors. + """ + + path_ext: str | None = ( + os.path.splitext(self.path_or_buffer.lower())[1][1:] + if isinstance(self.path_or_buffer, str) + else None + ) + + if self.file_format not in ["infer", "rdata", "rda", "rds"]: + raise ValueError( + f"'{self.file_format}' is not a valid value for file_format" + ) + + if ( + self.file_format == "infer" + and isinstance(self.path_or_buffer, str) + and path_ext not in ["rdata", "rda", "rds"] + ) or (self.file_format == "infer" and not isinstance(self.path_or_buffer, str)): + raise ValueError( + f"Unable to infer file format from file name: {self.path_or_buffer}. " + "Please use known R data type (rdata, rda, rds)." + ) + + if self.file_format == "infer" and isinstance(path_ext, str): + self.file_format = path_ext + + if self.select_frames is not None and not is_list_like(self.select_frames): + raise TypeError( + f"{type(self.select_frames).__name__} is " + "not a valid type for select_frames" + ) + + def buffer_to_disk(self, tmp_dir: str) -> str: + """ + Convert path or buffer to disk file. + + This method will convert path_or_buffer to temp file + to parse RData from disk. + """ + + r_temp = os.path.join(tmp_dir, "rdata.rda") + + handle_data = get_data_from_filepath( + filepath_or_buffer=self.path_or_buffer, + encoding="utf-8", + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as r_data: + if isinstance(r_data, io.BytesIO): + with open(r_temp, "wb") as f: + f.write(r_data.read()) + + return r_temp + + def build_frame(self, data_dict: dict) -> DataFrame: + """ + Builds DataFrame from raw, nested parsed RData dict. + + Converts special class variables (bools, factors, dates, datetimes), + then binds all columns together with DataFrame constructor. + """ + + final_dict = { + k: Series(v) + for k, v in data_dict["data"].items() + if k not in ["dtypes", "colnames", "rownames"] + } + + rdf = DataFrame(data=final_dict) + + for col, dtype in data_dict["dtypes"].items(): + if dtype == "bool": + rdf[col] = rdf[col].astype(bool) + + if dtype == "factor": + rdf[col] = Categorical(rdf[col]) + + if dtype == "date": + rdf[col] = to_datetime(rdf[col], unit="d") + + if dtype == "datetime": + rdf[col] = to_datetime(rdf[col], unit="s") + + colnames = ( + None + if data_dict["colnames"] is None + else list(data_dict["colnames"].values()) + ) + if colnames is not None: + rdf.columns = Index(colnames) + + rownames = ( + None + if data_dict["rownames"] is None + else list(data_dict["rownames"].values()) + ) + if self.rownames: + if rownames is not None: + rdf.index = Index(rownames) + else: + rdf.index += 1 + rdf.index.name = "rownames" + + return rdf + + def parse_data(self) -> dict[str, DataFrame]: + """ + Parse R data files into DataFrames + + This method will retrieve dictionary of R data and build + DataFrame for each item in data file + """ + + lbr = LibrdataReader() + + with TemporaryDirectory() as tmp_dir: + r_temp = self.buffer_to_disk(tmp_dir) + rdict = lbr.read_rdata(r_temp) + + r_dfs = {k: self.build_frame(v) for k, v in rdict.items()} + + if self.select_frames: + r_dfs = {k: v for k, v in r_dfs.items() if k in self.select_frames} + + return r_dfs diff --git a/pandas/io/rdata/rdata_writer.py b/pandas/io/rdata/rdata_writer.py new file mode 100644 index 0000000000000..5bb5b4f3f3b90 --- /dev/null +++ b/pandas/io/rdata/rdata_writer.py @@ -0,0 +1,189 @@ +""" +write R data files (RData, rda, rds). + +This IO module interfaces with the librdata C library by Evan Miller: + https://github.com/WizardMac/librdata +""" +from __future__ import annotations + +import os +from tempfile import TemporaryDirectory + +from pandas._typing import ( + CompressionOptions, + FilePathOrBuffer, + StorageOptions, +) + +from pandas.core.frame import DataFrame + +from pandas.io.common import get_handle +from pandas.io.rdata._rdata import LibrdataWriter + + +class RDataWriter: + """ + Subclass to write pandas DataFrames into R data files. + + Parameters + ---------- + path_or_buffer : a valid str, path object or file-like object + Any valid string path is acceptable. + + file_format : {{'infer', 'rdata', 'rda', 'rds'}}, default 'infer' + R serialization type. + + rda_name : str, default "pandas_dataframe" + Name for exported DataFrame in rda file. + + index : bool, default True + Include index or MultiIndex in output as separate columns. + + compression : {'gzip', 'bz2', 'xz', None}, default 'gzip' + Compression type for on-the-fly decompression of on-disk data. + + storage_options : dict, optional + Extra options that make sense for a particular storage connection, + e.g. host, port, username, password, etc. + """ + + def __init__( + self, + frame: DataFrame, + path_or_buffer: FilePathOrBuffer, + file_format: str = "infer", + rda_name: str = "pandas_dataframe", + index: bool = True, + compression: CompressionOptions = "gzip", + storage_options: StorageOptions = None, + ) -> None: + self.frame = frame + self.path_or_buffer = path_or_buffer + self.file_format = file_format.lower() + self.rda_name = rda_name + self.index = index + self.compression = compression + self.storage_options = storage_options + self.verify_params() + + def verify_params(self) -> None: + """ + Verify user entries of parameters. + + This method will check the values and types of select parameters + and raise appropriate errors. + """ + + path_ext: str | None = ( + os.path.splitext(self.path_or_buffer.lower())[1][1:] + if isinstance(self.path_or_buffer, str) + else None + ) + + if self.file_format not in ["infer", "rdata", "rda", "rds"]: + raise ValueError( + f"{self.file_format} is not a valid value for file_format." + ) + + if ( + self.file_format == "infer" + and isinstance(self.path_or_buffer, str) + and path_ext not in ["rdata", "rda", "rds"] + ): + raise ValueError( + f"Unable to infer file format from file name: {self.path_or_buffer}" + "Please use known R data type (rdata, rda, rds)." + ) + + if self.file_format == "infer" and isinstance(path_ext, str): + self.file_format = path_ext + + if self.compression is not None and self.compression not in [ + "gzip", + "bz2", + "xz", + ]: + raise ValueError( + f"{self.compression} is not a supported value for compression." + ) + + def disk_to_buffer(self, r_file: str) -> None: + """ + Save temp file to path or buffer. + + This method will convert written R data to path_or_buffer. + """ + + with open(r_file, "rb") as rdata: + with get_handle( + self.path_or_buffer, + "wb", + compression=self.compression, + storage_options=self.storage_options, + is_text=False, + ) as handles: + handles.handle.write(rdata.read()) # type: ignore[arg-type] + + return None + + def write_data(self) -> None: + """ + Write DataFrames to R data files. + + Converts non-primitive and non-datetimes to object to align to R + atomic types, then exports dictionaries of each column with meta data. + """ + + self.frame = ( + self.frame.reset_index() + if self.index + else self.frame.reset_index(drop=True) + ) + + excl_types = ["bool", "number", "object", "datetime", "datetimetz", "timedelta"] + for col in self.frame.select_dtypes(exclude=excl_types).columns: + self.frame[col] = self.frame[col].astype(str) + + for col in self.frame.select_dtypes(include=["datetimetz"]).columns: + self.frame[col] = self.frame[col].dt.tz_localize(None) + + for col in self.frame.select_dtypes(include=["timedelta"]).columns: + self.frame[col] = self.frame[col].dt.total_seconds() + + rdict = {"dtypes": {k: str(v) for k, v in self.frame.dtypes.to_dict().items()}} + + for k, v in rdict["dtypes"].items(): + if any(x in v for x in ("bool", "Boolean")): + rdict["dtypes"][k] = "bool" + + elif any(x in v for x in ("int", "uint", "Int", "UInt")): + rdict["dtypes"][k] = "int" + + elif any(x in v for x in ("float", "Float")): + rdict["dtypes"][k] = "float" + + elif any(x in v for x in ("datetime", "Datetime")): + rdict["dtypes"][k] = "datetime" + + elif any(x in v for x in ("object", "string", "String")): + rdict["dtypes"][k] = "object" + + for col in self.frame.select_dtypes(include=["datetime"]).columns: + self.frame[col] = self.frame[col].values.view("int64") / (10 ** 9) + + rdict["data"] = self.frame.to_dict() + + lbw = LibrdataWriter() + + with TemporaryDirectory() as tmp_dir: + r_temp = os.path.join(tmp_dir, "rdata.rda") + lbw.write_rdata( + rfile=r_temp, + rdict=rdict, + rformat=self.file_format, + tbl_name=self.rda_name, + ) + + self.disk_to_buffer(r_temp) + + return None diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 38984238ecf65..225b4805717c3 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -163,6 +163,7 @@ class TestPDApi(Base): "read_xml", "read_json", "read_pickle", + "read_rdata", "read_sas", "read_sql", "read_sql_query", diff --git a/pandas/tests/io/data/rdata/climate_non_utf8_df.rda b/pandas/tests/io/data/rdata/climate_non_utf8_df.rda new file mode 100644 index 0000000000000..a506806405f5e Binary files /dev/null and b/pandas/tests/io/data/rdata/climate_non_utf8_df.rda differ diff --git a/pandas/tests/io/data/rdata/climate_non_utf8_df.rds b/pandas/tests/io/data/rdata/climate_non_utf8_df.rds new file mode 100644 index 0000000000000..85a65550ad80f Binary files /dev/null and b/pandas/tests/io/data/rdata/climate_non_utf8_df.rds differ diff --git a/pandas/tests/io/data/rdata/env_data_dfs.rda b/pandas/tests/io/data/rdata/env_data_dfs.rda new file mode 100644 index 0000000000000..07fbef3ecb00d Binary files /dev/null and b/pandas/tests/io/data/rdata/env_data_dfs.rda differ diff --git a/pandas/tests/io/data/rdata/env_data_non_dfs.rda b/pandas/tests/io/data/rdata/env_data_non_dfs.rda new file mode 100644 index 0000000000000..e1b6bbb5e117e Binary files /dev/null and b/pandas/tests/io/data/rdata/env_data_non_dfs.rda differ diff --git a/pandas/tests/io/data/rdata/env_data_objs.rda b/pandas/tests/io/data/rdata/env_data_objs.rda new file mode 100644 index 0000000000000..61731d7774e45 Binary files /dev/null and b/pandas/tests/io/data/rdata/env_data_objs.rda differ diff --git a/pandas/tests/io/data/rdata/ghg_df.rds b/pandas/tests/io/data/rdata/ghg_df.rds new file mode 100644 index 0000000000000..18c91b7acf9d7 Binary files /dev/null and b/pandas/tests/io/data/rdata/ghg_df.rds differ diff --git a/pandas/tests/io/data/rdata/ghg_t_tests.rds b/pandas/tests/io/data/rdata/ghg_t_tests.rds new file mode 100644 index 0000000000000..e58879d33c1c8 Binary files /dev/null and b/pandas/tests/io/data/rdata/ghg_t_tests.rds differ diff --git a/pandas/tests/io/data/rdata/planetary_boundaries_df.rda b/pandas/tests/io/data/rdata/planetary_boundaries_df.rda new file mode 100644 index 0000000000000..0c3308434ccbb Binary files /dev/null and b/pandas/tests/io/data/rdata/planetary_boundaries_df.rda differ diff --git a/pandas/tests/io/data/rdata/planetary_boundaries_df.rds b/pandas/tests/io/data/rdata/planetary_boundaries_df.rds new file mode 100644 index 0000000000000..b370d2bd13785 Binary files /dev/null and b/pandas/tests/io/data/rdata/planetary_boundaries_df.rds differ diff --git a/pandas/tests/io/data/rdata/plants_arry.rds b/pandas/tests/io/data/rdata/plants_arry.rds new file mode 100644 index 0000000000000..e1d7032acebeb Binary files /dev/null and b/pandas/tests/io/data/rdata/plants_arry.rds differ diff --git a/pandas/tests/io/data/rdata/plants_df.rds b/pandas/tests/io/data/rdata/plants_df.rds new file mode 100644 index 0000000000000..5b9f58f6483ba Binary files /dev/null and b/pandas/tests/io/data/rdata/plants_df.rds differ diff --git a/pandas/tests/io/data/rdata/ppm_df.csv b/pandas/tests/io/data/rdata/ppm_df.csv new file mode 100644 index 0000000000000..4a2663110dca3 --- /dev/null +++ b/pandas/tests/io/data/rdata/ppm_df.csv @@ -0,0 +1,757 @@ +"year","month","decimal_date","monthly_average","de_seasonalized","num_days","st_dev_of_days","unc_mon_mean" +1958,3,1958.2027,315.7,314.43,-1,-9.99,-0.99 +1958,4,1958.2877,317.45,315.16,-1,-9.99,-0.99 +1958,5,1958.3699,317.51,314.71,-1,-9.99,-0.99 +1958,6,1958.4548,317.24,315.14,-1,-9.99,-0.99 +1958,7,1958.537,315.86,315.18,-1,-9.99,-0.99 +1958,8,1958.6219,314.93,316.18,-1,-9.99,-0.99 +1958,9,1958.7068,313.2,316.08,-1,-9.99,-0.99 +1958,10,1958.789,312.43,315.41,-1,-9.99,-0.99 +1958,11,1958.874,313.33,315.2,-1,-9.99,-0.99 +1958,12,1958.9562,314.67,315.43,-1,-9.99,-0.99 +1959,1,1959.0411,315.58,315.55,-1,-9.99,-0.99 +1959,2,1959.126,316.48,315.86,-1,-9.99,-0.99 +1959,3,1959.2027,316.65,315.38,-1,-9.99,-0.99 +1959,4,1959.2877,317.72,315.41,-1,-9.99,-0.99 +1959,5,1959.3699,318.29,315.49,-1,-9.99,-0.99 +1959,6,1959.4548,318.15,316.03,-1,-9.99,-0.99 +1959,7,1959.537,316.54,315.86,-1,-9.99,-0.99 +1959,8,1959.6219,314.8,316.06,-1,-9.99,-0.99 +1959,9,1959.7068,313.84,316.73,-1,-9.99,-0.99 +1959,10,1959.789,313.33,316.33,-1,-9.99,-0.99 +1959,11,1959.874,314.81,316.68,-1,-9.99,-0.99 +1959,12,1959.9562,315.58,316.35,-1,-9.99,-0.99 +1960,1,1960.041,316.43,316.4,-1,-9.99,-0.99 +1960,2,1960.1257,316.98,316.36,-1,-9.99,-0.99 +1960,3,1960.2049,317.58,316.28,-1,-9.99,-0.99 +1960,4,1960.2896,319.03,316.7,-1,-9.99,-0.99 +1960,5,1960.3716,320.04,317.22,-1,-9.99,-0.99 +1960,6,1960.4563,319.59,317.47,-1,-9.99,-0.99 +1960,7,1960.5383,318.18,317.52,-1,-9.99,-0.99 +1960,8,1960.623,315.9,317.19,-1,-9.99,-0.99 +1960,9,1960.7077,314.17,317.08,-1,-9.99,-0.99 +1960,10,1960.7896,313.83,316.83,-1,-9.99,-0.99 +1960,11,1960.8743,315,316.88,-1,-9.99,-0.99 +1960,12,1960.9563,316.19,316.96,-1,-9.99,-0.99 +1961,1,1961.0411,316.89,316.86,-1,-9.99,-0.99 +1961,2,1961.126,317.7,317.08,-1,-9.99,-0.99 +1961,3,1961.2027,318.54,317.26,-1,-9.99,-0.99 +1961,4,1961.2877,319.48,317.16,-1,-9.99,-0.99 +1961,5,1961.3699,320.58,317.76,-1,-9.99,-0.99 +1961,6,1961.4548,319.77,317.63,-1,-9.99,-0.99 +1961,7,1961.537,318.57,317.88,-1,-9.99,-0.99 +1961,8,1961.6219,316.79,318.06,-1,-9.99,-0.99 +1961,9,1961.7068,314.99,317.9,-1,-9.99,-0.99 +1961,10,1961.789,315.31,318.32,-1,-9.99,-0.99 +1961,11,1961.874,316.1,317.99,-1,-9.99,-0.99 +1961,12,1961.9562,317.01,317.79,-1,-9.99,-0.99 +1962,1,1962.0411,317.94,317.91,-1,-9.99,-0.99 +1962,2,1962.126,318.55,317.92,-1,-9.99,-0.99 +1962,3,1962.2027,319.68,318.39,-1,-9.99,-0.99 +1962,4,1962.2877,320.57,318.24,-1,-9.99,-0.99 +1962,5,1962.3699,321.02,318.18,-1,-9.99,-0.99 +1962,6,1962.4548,320.62,318.47,-1,-9.99,-0.99 +1962,7,1962.537,319.61,318.92,-1,-9.99,-0.99 +1962,8,1962.6219,317.4,318.68,-1,-9.99,-0.99 +1962,9,1962.7068,316.25,319.17,-1,-9.99,-0.99 +1962,10,1962.789,315.42,318.45,-1,-9.99,-0.99 +1962,11,1962.874,316.69,318.58,-1,-9.99,-0.99 +1962,12,1962.9562,317.7,318.47,-1,-9.99,-0.99 +1963,1,1963.0411,318.74,318.7,-1,-9.99,-0.99 +1963,2,1963.126,319.07,318.44,-1,-9.99,-0.99 +1963,3,1963.2027,319.86,318.57,-1,-9.99,-0.99 +1963,4,1963.2877,321.38,319.05,-1,-9.99,-0.99 +1963,5,1963.3699,322.25,319.4,-1,-9.99,-0.99 +1963,6,1963.4548,321.48,319.32,-1,-9.99,-0.99 +1963,7,1963.537,319.74,319.05,-1,-9.99,-0.99 +1963,8,1963.6219,317.77,319.05,-1,-9.99,-0.99 +1963,9,1963.7068,316.21,319.14,-1,-9.99,-0.99 +1963,10,1963.789,315.99,319.02,-1,-9.99,-0.99 +1963,11,1963.874,317.07,318.97,-1,-9.99,-0.99 +1963,12,1963.9562,318.35,319.13,-1,-9.99,-0.99 +1964,1,1964.041,319.57,319.54,-1,-9.99,-0.99 +1964,2,1964.1257,320.01,319.37,-1,-9.99,-0.99 +1964,3,1964.2049,320.74,319.41,-1,-9.99,-0.99 +1964,4,1964.2896,321.84,319.45,-1,-9.99,-0.99 +1964,5,1964.3716,322.26,319.4,-1,-9.99,-0.99 +1964,6,1964.4563,321.89,319.75,-1,-9.99,-0.99 +1964,7,1964.5383,320.44,319.77,-1,-9.99,-0.99 +1964,8,1964.623,318.69,320,-1,-9.99,-0.99 +1964,9,1964.7077,316.7,319.66,-1,-9.99,-0.99 +1964,10,1964.7896,316.87,319.91,-1,-9.99,-0.99 +1964,11,1964.8743,317.68,319.58,-1,-9.99,-0.99 +1964,12,1964.9563,318.71,319.49,-1,-9.99,-0.99 +1965,1,1965.0411,319.44,319.4,-1,-9.99,-0.99 +1965,2,1965.126,320.44,319.81,-1,-9.99,-0.99 +1965,3,1965.2027,320.89,319.59,-1,-9.99,-0.99 +1965,4,1965.2877,322.14,319.78,-1,-9.99,-0.99 +1965,5,1965.3699,322.17,319.3,-1,-9.99,-0.99 +1965,6,1965.4548,321.87,319.7,-1,-9.99,-0.99 +1965,7,1965.537,321.21,320.51,-1,-9.99,-0.99 +1965,8,1965.6219,318.87,320.15,-1,-9.99,-0.99 +1965,9,1965.7068,317.81,320.77,-1,-9.99,-0.99 +1965,10,1965.789,317.3,320.36,-1,-9.99,-0.99 +1965,11,1965.874,318.87,320.78,-1,-9.99,-0.99 +1965,12,1965.9562,319.42,320.2,-1,-9.99,-0.99 +1966,1,1966.0411,320.62,320.59,-1,-9.99,-0.99 +1966,2,1966.126,321.6,320.96,-1,-9.99,-0.99 +1966,3,1966.2027,322.39,321.08,-1,-9.99,-0.99 +1966,4,1966.2877,323.7,321.34,-1,-9.99,-0.99 +1966,5,1966.3699,324.08,321.2,-1,-9.99,-0.99 +1966,6,1966.4548,323.75,321.57,-1,-9.99,-0.99 +1966,7,1966.537,322.38,321.68,-1,-9.99,-0.99 +1966,8,1966.6219,320.36,321.65,-1,-9.99,-0.99 +1966,9,1966.7068,318.64,321.6,-1,-9.99,-0.99 +1966,10,1966.789,318.1,321.17,-1,-9.99,-0.99 +1966,11,1966.874,319.78,321.7,-1,-9.99,-0.99 +1966,12,1966.9562,321.03,321.81,-1,-9.99,-0.99 +1967,1,1967.0411,322.33,322.29,-1,-9.99,-0.99 +1967,2,1967.126,322.5,321.86,-1,-9.99,-0.99 +1967,3,1967.2027,323.04,321.73,-1,-9.99,-0.99 +1967,4,1967.2877,324.42,322.04,-1,-9.99,-0.99 +1967,5,1967.3699,325,322.12,-1,-9.99,-0.99 +1967,6,1967.4548,324.09,321.91,-1,-9.99,-0.99 +1967,7,1967.537,322.54,321.84,-1,-9.99,-0.99 +1967,8,1967.6219,320.92,322.21,-1,-9.99,-0.99 +1967,9,1967.7068,319.25,322.23,-1,-9.99,-0.99 +1967,10,1967.789,319.39,322.47,-1,-9.99,-0.99 +1967,11,1967.874,320.73,322.65,-1,-9.99,-0.99 +1967,12,1967.9562,321.96,322.75,-1,-9.99,-0.99 +1968,1,1968.041,322.57,322.54,-1,-9.99,-0.99 +1968,2,1968.1257,323.15,322.51,-1,-9.99,-0.99 +1968,3,1968.2049,323.89,322.55,-1,-9.99,-0.99 +1968,4,1968.2896,325.02,322.62,-1,-9.99,-0.99 +1968,5,1968.3716,325.57,322.68,-1,-9.99,-0.99 +1968,6,1968.4563,325.36,323.19,-1,-9.99,-0.99 +1968,7,1968.5383,324.14,323.46,-1,-9.99,-0.99 +1968,8,1968.623,322.11,323.43,-1,-9.99,-0.99 +1968,9,1968.7077,320.33,323.32,-1,-9.99,-0.99 +1968,10,1968.7896,320.25,323.33,-1,-9.99,-0.99 +1968,11,1968.8743,321.32,323.25,-1,-9.99,-0.99 +1968,12,1968.9563,322.89,323.69,-1,-9.99,-0.99 +1969,1,1969.0411,324,323.97,-1,-9.99,-0.99 +1969,2,1969.126,324.42,323.77,-1,-9.99,-0.99 +1969,3,1969.2027,325.63,324.31,-1,-9.99,-0.99 +1969,4,1969.2877,326.66,324.27,-1,-9.99,-0.99 +1969,5,1969.3699,327.38,324.48,-1,-9.99,-0.99 +1969,6,1969.4548,326.71,324.51,-1,-9.99,-0.99 +1969,7,1969.537,325.88,325.17,-1,-9.99,-0.99 +1969,8,1969.6219,323.66,324.97,-1,-9.99,-0.99 +1969,9,1969.7068,322.38,325.37,-1,-9.99,-0.99 +1969,10,1969.789,321.78,324.88,-1,-9.99,-0.99 +1969,11,1969.874,322.86,324.79,-1,-9.99,-0.99 +1969,12,1969.9562,324.12,324.91,-1,-9.99,-0.99 +1970,1,1970.0411,325.06,325.03,-1,-9.99,-0.99 +1970,2,1970.126,325.98,325.34,-1,-9.99,-0.99 +1970,3,1970.2027,326.93,325.61,-1,-9.99,-0.99 +1970,4,1970.2877,328.13,325.74,-1,-9.99,-0.99 +1970,5,1970.3699,328.08,325.16,-1,-9.99,-0.99 +1970,6,1970.4548,327.67,325.46,-1,-9.99,-0.99 +1970,7,1970.537,326.34,325.63,-1,-9.99,-0.99 +1970,8,1970.6219,324.69,325.99,-1,-9.99,-0.99 +1970,9,1970.7068,323.1,326.1,-1,-9.99,-0.99 +1970,10,1970.789,323.06,326.18,-1,-9.99,-0.99 +1970,11,1970.874,324.01,325.95,-1,-9.99,-0.99 +1970,12,1970.9562,325.13,325.93,-1,-9.99,-0.99 +1971,1,1971.0411,326.17,326.14,-1,-9.99,-0.99 +1971,2,1971.126,326.68,326.03,-1,-9.99,-0.99 +1971,3,1971.2027,327.17,325.85,-1,-9.99,-0.99 +1971,4,1971.2877,327.79,325.38,-1,-9.99,-0.99 +1971,5,1971.3699,328.93,326,-1,-9.99,-0.99 +1971,6,1971.4548,328.57,326.36,-1,-9.99,-0.99 +1971,7,1971.537,327.36,326.65,-1,-9.99,-0.99 +1971,8,1971.6219,325.43,326.74,-1,-9.99,-0.99 +1971,9,1971.7068,323.36,326.37,-1,-9.99,-0.99 +1971,10,1971.789,323.56,326.69,-1,-9.99,-0.99 +1971,11,1971.874,324.8,326.75,-1,-9.99,-0.99 +1971,12,1971.9562,326.01,326.82,-1,-9.99,-0.99 +1972,1,1972.041,326.77,326.73,-1,-9.99,-0.99 +1972,2,1972.1257,327.63,326.98,-1,-9.99,-0.99 +1972,3,1972.2049,327.75,326.39,-1,-9.99,-0.99 +1972,4,1972.2896,329.72,327.29,-1,-9.99,-0.99 +1972,5,1972.3716,330.07,327.14,-1,-9.99,-0.99 +1972,6,1972.4563,329.09,326.88,-1,-9.99,-0.99 +1972,7,1972.5383,328.04,327.36,-1,-9.99,-0.99 +1972,8,1972.623,326.32,327.67,-1,-9.99,-0.99 +1972,9,1972.7077,324.84,327.87,-1,-9.99,-0.99 +1972,10,1972.7896,325.2,328.33,-1,-9.99,-0.99 +1972,11,1972.8743,326.5,328.45,-1,-9.99,-0.99 +1972,12,1972.9563,327.55,328.36,-1,-9.99,-0.99 +1973,1,1973.0411,328.55,328.51,-1,-9.99,-0.99 +1973,2,1973.126,329.56,328.91,-1,-9.99,-0.99 +1973,3,1973.2027,330.3,328.96,-1,-9.99,-0.99 +1973,4,1973.2877,331.5,329.08,-1,-9.99,-0.99 +1973,5,1973.3699,332.48,329.54,-1,-9.99,-0.99 +1973,6,1973.4548,332.07,329.84,-1,-9.99,-0.99 +1973,7,1973.537,330.87,330.15,-1,-9.99,-0.99 +1973,8,1973.6219,329.31,330.63,-1,-9.99,-0.99 +1973,9,1973.7068,327.51,330.55,-1,-9.99,-0.99 +1973,10,1973.789,327.18,330.32,-1,-9.99,-0.99 +1973,11,1973.874,328.16,330.13,-1,-9.99,-0.99 +1973,12,1973.9562,328.64,329.45,-1,-9.99,-0.99 +1974,1,1974.0411,329.35,329.32,-1,-9.99,-0.99 +1974,2,1974.126,330.71,330.05,-1,-9.99,-0.99 +1974,3,1974.2027,331.48,330.14,-1,-9.99,-0.99 +1974,4,1974.2877,332.65,330.22,-1,-9.99,-0.99 +1974,5,1974.375,333.19,330.22,13,0.31,0.16 +1974,6,1974.4583,332.2,329.79,25,0.37,0.14 +1974,7,1974.5417,331.07,330.21,24,0.24,0.09 +1974,8,1974.625,329.15,330.54,26,0.31,0.12 +1974,9,1974.7083,327.33,330.44,22,0.47,0.19 +1974,10,1974.7917,327.28,330.52,24,0.22,0.09 +1974,11,1974.875,328.31,330.5,26,0.43,0.16 +1974,12,1974.9583,329.58,330.56,29,0.29,0.1 +1975,1,1975.0417,330.73,330.84,29,0.43,0.15 +1975,2,1975.125,331.46,330.85,26,0.46,0.17 +1975,3,1975.2083,331.94,330.37,17,0.33,0.15 +1975,4,1975.2917,333.11,330.53,23,0.59,0.24 +1975,5,1975.375,333.95,330.97,28,0.35,0.13 +1975,6,1975.4583,333.42,331.01,27,0.48,0.18 +1975,7,1975.5417,331.97,331.12,24,0.45,0.18 +1975,8,1975.625,329.95,331.33,24,0.47,0.18 +1975,9,1975.7083,328.5,331.6,22,0.53,0.22 +1975,10,1975.7917,328.36,331.61,11,0.21,0.12 +1975,11,1975.875,329.38,331.57,18,0.31,0.14 +1975,12,1975.9583,330.62,331.6,-1,-9.99,-0.99 +1976,1,1976.0417,331.56,331.67,19,0.23,0.1 +1976,2,1976.125,332.74,332.13,22,0.49,0.2 +1976,3,1976.2083,333.36,331.79,18,0.52,0.23 +1976,4,1976.2917,334.74,332.16,18,0.77,0.35 +1976,5,1976.375,334.72,331.75,21,0.56,0.23 +1976,6,1976.4583,333.98,331.56,15,0.21,0.1 +1976,7,1976.5417,333.08,332.22,15,0.24,0.12 +1976,8,1976.625,330.68,332.07,23,0.51,0.2 +1976,9,1976.7083,328.96,332.07,13,0.69,0.37 +1976,10,1976.7917,328.72,331.97,19,0.57,0.25 +1976,11,1976.875,330.16,332.35,25,0.36,0.14 +1976,12,1976.9583,331.62,332.6,20,0.38,0.16 +1977,1,1977.0417,332.68,332.77,23,0.4,0.16 +1977,2,1977.125,333.17,332.57,20,0.34,0.15 +1977,3,1977.2083,334.96,333.4,23,0.51,0.21 +1977,4,1977.2917,336.14,333.54,20,0.5,0.21 +1977,5,1977.375,336.93,333.99,20,0.31,0.13 +1977,6,1977.4583,336.17,333.79,22,0.4,0.16 +1977,7,1977.5417,334.88,334,20,0.23,0.1 +1977,8,1977.625,332.56,333.9,18,0.46,0.21 +1977,9,1977.7083,331.29,334.36,19,0.46,0.2 +1977,10,1977.7917,331.28,334.5,23,0.29,0.12 +1977,11,1977.875,332.46,334.69,21,0.43,0.18 +1977,12,1977.9583,333.6,334.59,25,0.36,0.14 +1978,1,1978.0417,334.94,335.01,22,0.52,0.21 +1978,2,1978.125,335.26,334.59,25,0.5,0.19 +1978,3,1978.2083,336.66,335,28,0.59,0.21 +1978,4,1978.2917,337.69,335.07,18,0.44,0.2 +1978,5,1978.375,338.02,335.07,26,0.46,0.17 +1978,6,1978.4583,338.01,335.59,17,0.31,0.15 +1978,7,1978.5417,336.5,335.65,20,0.32,0.14 +1978,8,1978.625,334.42,335.87,19,0.32,0.14 +1978,9,1978.7083,332.36,335.51,17,0.75,0.35 +1978,10,1978.7917,332.45,335.72,21,0.34,0.14 +1978,11,1978.875,333.76,335.99,24,0.25,0.1 +1978,12,1978.9583,334.91,335.89,26,0.33,0.12 +1979,1,1979.0417,336.14,336.22,27,0.55,0.2 +1979,2,1979.125,336.69,336,25,0.3,0.11 +1979,3,1979.2083,338.27,336.56,21,0.63,0.26 +1979,4,1979.2917,338.82,336.11,24,0.67,0.26 +1979,5,1979.375,339.24,336.24,20,0.5,0.22 +1979,6,1979.4583,339.26,336.83,19,0.35,0.15 +1979,7,1979.5417,337.54,336.69,26,0.59,0.22 +1979,8,1979.625,335.72,337.2,24,0.6,0.23 +1979,9,1979.7083,333.98,337.19,19,0.65,0.29 +1979,10,1979.7917,334.24,337.57,25,0.42,0.16 +1979,11,1979.875,335.32,337.59,27,0.3,0.11 +1979,12,1979.9583,336.81,337.83,22,0.23,0.09 +1980,1,1980.0417,337.9,338.13,29,0.57,0.2 +1980,2,1980.125,338.34,337.85,26,0.49,0.18 +1980,3,1980.2083,340.07,338.51,23,0.54,0.22 +1980,4,1980.2917,340.93,338.31,24,0.29,0.11 +1980,5,1980.375,341.45,338.4,24,0.54,0.21 +1980,6,1980.4583,341.36,338.85,20,0.39,0.17 +1980,7,1980.5417,339.45,338.56,26,0.6,0.22 +1980,8,1980.625,337.67,339.07,16,1.05,0.5 +1980,9,1980.7083,336.25,339.37,15,0.69,0.34 +1980,10,1980.7917,336.14,339.4,26,0.26,0.1 +1980,11,1980.875,337.3,339.46,27,0.26,0.1 +1980,12,1980.9583,338.29,339.26,24,0.25,0.1 +1981,1,1981.0417,339.29,339.42,28,0.39,0.14 +1981,2,1981.125,340.55,339.97,25,0.65,0.25 +1981,3,1981.2083,341.63,340.09,25,0.48,0.19 +1981,4,1981.2917,342.6,340,26,0.46,0.17 +1981,5,1981.375,343.04,339.98,30,0.19,0.07 +1981,6,1981.4583,342.54,340.05,25,0.29,0.11 +1981,7,1981.5417,340.82,339.92,24,0.46,0.18 +1981,8,1981.625,338.48,339.87,25,0.48,0.18 +1981,9,1981.7083,336.95,340.16,27,0.55,0.2 +1981,10,1981.7917,337.05,340.39,25,0.39,0.15 +1981,11,1981.875,338.58,340.74,26,0.31,0.12 +1981,12,1981.9583,339.91,340.85,20,0.28,0.12 +1982,1,1982.0417,340.93,341.09,28,0.3,0.11 +1982,2,1982.125,341.76,341.15,24,0.49,0.19 +1982,3,1982.2083,342.78,341.18,17,0.41,0.19 +1982,4,1982.2917,343.96,341.34,7,0.42,0.31 +1982,5,1982.375,344.77,341.68,27,0.37,0.14 +1982,6,1982.4583,343.88,341.42,27,0.37,0.14 +1982,7,1982.5417,342.42,341.61,28,0.35,0.13 +1982,8,1982.625,340.24,341.64,25,0.61,0.23 +1982,9,1982.7083,338.38,341.56,21,0.59,0.25 +1982,10,1982.7917,338.41,341.77,26,0.5,0.19 +1982,11,1982.875,339.44,341.58,24,0.39,0.15 +1982,12,1982.9583,340.78,341.7,26,0.3,0.11 +1983,1,1983.0417,341.57,341.75,28,0.47,0.17 +1983,2,1983.125,342.79,342.24,24,0.37,0.15 +1983,3,1983.2083,343.37,341.86,27,0.88,0.32 +1983,4,1983.2917,345.4,342.78,23,0.29,0.12 +1983,5,1983.375,346.14,342.97,28,0.51,0.19 +1983,6,1983.4583,345.76,343.29,20,0.3,0.13 +1983,7,1983.5417,344.32,343.56,22,0.57,0.23 +1983,8,1983.625,342.51,343.89,16,0.73,0.35 +1983,9,1983.7083,340.46,343.59,15,0.5,0.25 +1983,10,1983.7917,340.53,343.86,20,0.31,0.13 +1983,11,1983.875,341.79,343.92,27,0.33,0.12 +1983,12,1983.9583,343.2,344.12,21,0.25,0.1 +1984,1,1984.0417,344.21,344.32,23,0.4,0.16 +1984,2,1984.125,344.92,344.38,23,0.32,0.13 +1984,3,1984.2083,345.68,344.26,19,0.3,0.13 +1984,4,1984.2917,347.14,344.54,2,-9.99,-0.99 +1984,5,1984.375,347.78,344.59,20,0.42,0.18 +1984,6,1984.4583,347.16,344.72,20,0.31,0.13 +1984,7,1984.5417,345.79,345.02,18,0.33,0.15 +1984,8,1984.625,343.74,345.11,12,0.45,0.25 +1984,9,1984.7083,341.59,344.75,14,0.72,0.37 +1984,10,1984.7917,341.86,345.19,12,0.36,0.2 +1984,11,1984.875,343.31,345.4,18,0.41,0.19 +1984,12,1984.9583,345,345.88,14,0.53,0.27 +1985,1,1985.0417,345.48,345.59,25,0.38,0.14 +1985,2,1985.125,346.41,345.91,15,0.37,0.18 +1985,3,1985.2083,347.91,346.57,17,0.34,0.16 +1985,4,1985.2917,348.66,346.1,21,0.61,0.25 +1985,5,1985.375,349.28,346.13,20,0.51,0.22 +1985,6,1985.4583,348.65,346.22,21,0.34,0.14 +1985,7,1985.5417,346.91,346.08,17,0.36,0.17 +1985,8,1985.625,345.26,346.57,16,0.57,0.27 +1985,9,1985.7083,343.47,346.58,24,0.57,0.22 +1985,10,1985.7917,343.35,346.6,20,0.29,0.13 +1985,11,1985.875,344.73,346.82,21,0.4,0.17 +1985,12,1985.9583,346.12,347.04,26,0.62,0.23 +1986,1,1986.0417,346.78,346.82,25,0.31,0.12 +1986,2,1986.125,347.48,346.97,25,0.45,0.17 +1986,3,1986.2083,348.25,346.94,16,0.7,0.34 +1986,4,1986.2917,349.86,347.32,19,0.38,0.17 +1986,5,1986.375,350.52,347.42,18,0.31,0.14 +1986,6,1986.4583,349.98,347.6,17,0.25,0.11 +1986,7,1986.5417,348.25,347.43,20,0.47,0.2 +1986,8,1986.625,346.17,347.51,18,0.48,0.21 +1986,9,1986.7083,345.48,348.61,17,0.63,0.29 +1986,10,1986.7917,344.82,348.04,25,0.32,0.12 +1986,11,1986.875,346.22,348.28,21,0.3,0.13 +1986,12,1986.9583,347.48,348.36,24,0.35,0.14 +1987,1,1987.0417,348.73,348.66,25,0.46,0.17 +1987,2,1987.125,348.92,348.23,25,0.58,0.22 +1987,3,1987.2083,349.81,348.39,21,0.35,0.15 +1987,4,1987.2917,351.4,348.86,26,0.68,0.25 +1987,5,1987.375,352.15,349.09,28,0.37,0.13 +1987,6,1987.4583,351.58,349.28,22,0.21,0.09 +1987,7,1987.5417,350.21,349.51,17,0.73,0.34 +1987,8,1987.625,348.2,349.65,15,0.85,0.42 +1987,9,1987.7083,346.66,349.85,23,0.61,0.24 +1987,10,1987.7917,346.72,349.96,22,0.41,0.17 +1987,11,1987.875,348.08,350.14,23,0.33,0.13 +1987,12,1987.9583,349.28,350.14,27,0.2,0.08 +1988,1,1988.0417,350.51,350.49,24,0.21,0.08 +1988,2,1988.125,351.7,350.99,23,0.57,0.23 +1988,3,1988.2083,352.5,350.99,25,0.78,0.3 +1988,4,1988.2917,353.67,351.03,27,0.48,0.18 +1988,5,1988.375,354.35,351.22,28,0.37,0.13 +1988,6,1988.4583,353.88,351.55,26,0.3,0.11 +1988,7,1988.5417,352.8,352.15,27,0.49,0.18 +1988,8,1988.625,350.49,352.01,26,0.62,0.23 +1988,9,1988.7083,348.97,352.18,26,0.47,0.18 +1988,10,1988.7917,349.37,352.62,26,0.31,0.12 +1988,11,1988.875,350.42,352.53,25,0.2,0.08 +1988,12,1988.9583,351.62,352.52,28,0.36,0.13 +1989,1,1989.0417,353.07,352.99,28,0.45,0.16 +1989,2,1989.125,353.43,352.69,25,0.38,0.15 +1989,3,1989.2083,354.08,352.6,29,0.53,0.19 +1989,4,1989.2917,355.72,353.07,28,0.47,0.17 +1989,5,1989.375,355.95,352.78,27,0.49,0.18 +1989,6,1989.4583,355.44,353.06,26,0.42,0.16 +1989,7,1989.5417,354.05,353.38,26,0.41,0.15 +1989,8,1989.625,351.84,353.43,25,0.48,0.18 +1989,9,1989.7083,350.09,353.37,24,0.69,0.27 +1989,10,1989.7917,350.33,353.57,25,0.34,0.13 +1989,11,1989.875,351.55,353.68,27,0.36,0.13 +1989,12,1989.9583,352.91,353.84,27,0.48,0.18 +1990,1,1990.0417,353.86,353.78,25,0.34,0.13 +1990,2,1990.125,355.1,354.37,28,0.66,0.24 +1990,3,1990.2083,355.75,354.27,27,0.57,0.21 +1990,4,1990.2917,356.38,353.76,28,0.55,0.2 +1990,5,1990.375,357.38,354.23,28,0.3,0.11 +1990,6,1990.4583,356.39,354.02,29,0.4,0.14 +1990,7,1990.5417,354.89,354.24,30,0.89,0.31 +1990,8,1990.625,353.06,354.68,22,0.62,0.25 +1990,9,1990.7083,351.38,354.69,27,0.72,0.26 +1990,10,1990.7917,351.69,354.94,28,0.3,0.11 +1990,11,1990.875,353.14,355.18,24,0.2,0.08 +1990,12,1990.9583,354.41,355.26,28,0.51,0.19 +1991,1,1991.0417,354.93,354.9,28,0.51,0.18 +1991,2,1991.125,355.82,355.11,26,0.54,0.2 +1991,3,1991.2083,357.33,355.79,30,0.73,0.25 +1991,4,1991.2917,358.77,356.13,30,0.66,0.23 +1991,5,1991.375,359.23,356.1,29,0.52,0.19 +1991,6,1991.4583,358.23,355.88,29,0.3,0.11 +1991,7,1991.5417,356.3,355.69,24,0.46,0.18 +1991,8,1991.625,353.97,355.6,23,0.39,0.15 +1991,9,1991.7083,352.34,355.66,27,0.37,0.14 +1991,10,1991.7917,352.43,355.69,27,0.25,0.09 +1991,11,1991.875,353.89,355.87,28,0.25,0.09 +1991,12,1991.9583,355.21,356.02,30,0.34,0.12 +1992,1,1992.0417,356.34,356.29,31,0.6,0.21 +1992,2,1992.125,357.21,356.47,27,0.56,0.21 +1992,3,1992.2083,357.97,356.38,24,0.72,0.28 +1992,4,1992.2917,359.22,356.51,27,0.53,0.2 +1992,5,1992.375,359.71,356.52,26,0.74,0.28 +1992,6,1992.4583,359.43,357.07,30,0.49,0.17 +1992,7,1992.5417,357.15,356.58,25,0.63,0.24 +1992,8,1992.625,354.99,356.67,24,0.62,0.24 +1992,9,1992.7083,353.01,356.36,25,0.98,0.38 +1992,10,1992.7917,353.41,356.72,29,0.56,0.2 +1992,11,1992.875,354.42,356.48,29,0.34,0.12 +1992,12,1992.9583,355.68,356.5,31,0.32,0.11 +1993,1,1993.0417,357.1,357.06,28,0.58,0.21 +1993,2,1993.125,357.42,356.54,28,0.49,0.18 +1993,3,1993.2083,358.59,356.88,30,0.72,0.25 +1993,4,1993.2917,359.39,356.71,25,0.53,0.2 +1993,5,1993.375,360.3,357.14,30,0.45,0.16 +1993,6,1993.4583,359.64,357.24,28,0.35,0.13 +1993,7,1993.5417,357.46,356.87,25,0.78,0.3 +1993,8,1993.625,355.76,357.44,27,0.62,0.23 +1993,9,1993.7083,354.14,357.51,23,0.73,0.29 +1993,10,1993.7917,354.23,357.61,28,0.29,0.11 +1993,11,1993.875,355.53,357.65,29,0.26,0.09 +1993,12,1993.9583,357.03,357.92,29,0.28,0.1 +1994,1,1994.0417,358.36,358.25,27,0.33,0.12 +1994,2,1994.125,359.04,358.21,25,0.5,0.19 +1994,3,1994.2083,360.11,358.41,29,0.82,0.29 +1994,4,1994.2917,361.36,358.59,28,0.5,0.18 +1994,5,1994.375,361.78,358.59,30,0.45,0.16 +1994,6,1994.4583,360.94,358.57,27,0.3,0.11 +1994,7,1994.5417,359.51,358.91,31,0.41,0.14 +1994,8,1994.625,357.59,359.29,24,0.43,0.17 +1994,9,1994.7083,355.86,359.3,24,0.58,0.23 +1994,10,1994.7917,356.21,359.63,28,0.28,0.1 +1994,11,1994.875,357.65,359.8,28,0.51,0.18 +1994,12,1994.9583,359.1,359.96,28,0.46,0.17 +1995,1,1995.0417,360.04,359.91,30,0.47,0.16 +1995,2,1995.125,361,360.18,28,0.52,0.19 +1995,3,1995.2083,361.98,360.37,29,0.78,0.28 +1995,4,1995.2917,363.44,360.76,29,0.65,0.23 +1995,5,1995.375,363.83,360.73,29,0.66,0.24 +1995,6,1995.4583,363.33,360.98,27,0.37,0.14 +1995,7,1995.5417,361.78,361.1,28,0.36,0.13 +1995,8,1995.625,359.33,360.93,24,0.7,0.28 +1995,9,1995.7083,358.32,361.71,24,0.68,0.26 +1995,10,1995.7917,358.14,361.52,29,0.26,0.09 +1995,11,1995.875,359.61,361.75,26,0.24,0.09 +1995,12,1995.9583,360.82,361.67,30,0.36,0.12 +1996,1,1996.0417,362.2,361.98,29,0.38,0.13 +1996,2,1996.125,363.36,362.47,28,0.55,0.2 +1996,3,1996.2083,364.28,362.64,28,0.67,0.24 +1996,4,1996.2917,364.69,361.99,29,0.59,0.21 +1996,5,1996.375,365.25,362.23,30,0.57,0.2 +1996,6,1996.4583,365.06,362.82,30,0.38,0.13 +1996,7,1996.5417,363.69,362.98,31,0.32,0.11 +1996,8,1996.625,361.55,363.13,27,0.49,0.18 +1996,9,1996.7083,359.69,363.14,25,0.75,0.29 +1996,10,1996.7917,359.72,363.12,29,0.32,0.11 +1996,11,1996.875,361.04,363.18,29,0.29,0.1 +1996,12,1996.9583,362.39,363.23,29,0.36,0.13 +1997,1,1997.0417,363.24,363.03,31,0.4,0.14 +1997,2,1997.125,364.21,363.4,28,0.62,0.22 +1997,3,1997.2083,364.65,363.02,31,0.4,0.14 +1997,4,1997.2917,366.48,363.82,21,0.46,0.19 +1997,5,1997.375,366.77,363.87,29,0.53,0.19 +1997,6,1997.4583,365.73,363.56,27,0.23,0.09 +1997,7,1997.5417,364.46,363.74,24,0.47,0.18 +1997,8,1997.625,362.4,363.98,25,0.57,0.22 +1997,9,1997.7083,360.44,363.83,26,0.63,0.24 +1997,10,1997.7917,360.98,364.28,27,0.32,0.12 +1997,11,1997.875,362.65,364.71,30,0.31,0.11 +1997,12,1997.9583,364.51,365.28,30,0.41,0.14 +1998,1,1998.0417,365.39,365.19,30,0.43,0.15 +1998,2,1998.125,366.1,365.29,28,0.62,0.23 +1998,3,1998.2083,367.36,365.73,31,0.82,0.28 +1998,4,1998.2917,368.79,366.17,29,0.63,0.22 +1998,5,1998.375,369.56,366.68,30,0.77,0.27 +1998,6,1998.4583,369.13,366.95,28,0.24,0.09 +1998,7,1998.5417,367.98,367.29,23,0.65,0.26 +1998,8,1998.625,366.1,367.69,30,0.3,0.1 +1998,9,1998.7083,364.16,367.51,28,0.4,0.14 +1998,10,1998.7917,364.54,367.82,30,0.26,0.09 +1998,11,1998.875,365.67,367.7,23,0.25,0.1 +1998,12,1998.9583,367.3,368.05,26,0.36,0.14 +1999,1,1999.0417,368.35,368.13,27,0.47,0.17 +1999,2,1999.125,369.28,368.46,21,0.47,0.2 +1999,3,1999.2083,369.84,368.24,25,0.81,0.31 +1999,4,1999.2917,371.15,368.62,29,0.67,0.24 +1999,5,1999.375,371.12,368.31,26,0.59,0.22 +1999,6,1999.4583,370.46,368.3,26,0.44,0.16 +1999,7,1999.5417,369.61,368.93,27,0.63,0.23 +1999,8,1999.625,367.06,368.63,25,0.38,0.14 +1999,9,1999.7083,364.95,368.28,28,0.74,0.27 +1999,10,1999.7917,365.52,368.8,31,0.28,0.1 +1999,11,1999.875,366.88,368.86,28,0.25,0.09 +1999,12,1999.9583,368.26,368.93,26,0.29,0.11 +2000,1,2000.0417,369.45,369.24,26,0.47,0.18 +2000,2,2000.125,369.71,368.99,19,0.48,0.21 +2000,3,2000.2083,370.75,369.24,30,0.47,0.16 +2000,4,2000.2917,371.98,369.44,27,0.58,0.21 +2000,5,2000.375,371.75,368.87,28,0.53,0.19 +2000,6,2000.4583,371.87,369.66,28,0.24,0.09 +2000,7,2000.5417,370.02,369.36,25,0.31,0.12 +2000,8,2000.625,368.27,369.87,27,0.42,0.15 +2000,9,2000.7083,367.15,370.46,25,0.36,0.14 +2000,10,2000.7917,367.18,370.42,30,0.27,0.09 +2000,11,2000.875,368.53,370.48,25,0.3,0.12 +2000,12,2000.9583,369.83,370.46,30,0.38,0.13 +2001,1,2001.0417,370.76,370.6,30,0.56,0.2 +2001,2,2001.125,371.69,370.95,26,0.61,0.23 +2001,3,2001.2083,372.63,371.06,26,0.46,0.17 +2001,4,2001.2917,373.55,370.99,29,0.56,0.2 +2001,5,2001.375,374.03,371.11,24,0.41,0.16 +2001,6,2001.4583,373.4,371.17,26,0.37,0.14 +2001,7,2001.5417,371.68,371.08,25,0.62,0.24 +2001,8,2001.625,369.78,371.39,27,0.6,0.22 +2001,9,2001.7083,368.34,371.61,28,0.49,0.18 +2001,10,2001.7917,368.61,371.85,31,0.33,0.11 +2001,11,2001.875,369.94,371.92,24,0.24,0.09 +2001,12,2001.9583,371.42,372.09,29,0.4,0.14 +2002,1,2002.0417,372.7,372.48,28,0.52,0.19 +2002,2,2002.125,373.37,372.49,28,0.66,0.24 +2002,3,2002.2083,374.3,372.61,24,0.62,0.24 +2002,4,2002.2917,375.19,372.54,29,0.55,0.19 +2002,5,2002.375,375.93,372.98,29,0.57,0.2 +2002,6,2002.4583,375.69,373.46,28,0.46,0.17 +2002,7,2002.5417,374.16,373.58,25,0.47,0.18 +2002,8,2002.625,372.03,373.7,28,0.65,0.24 +2002,9,2002.7083,370.93,374.29,23,0.74,0.3 +2002,10,2002.7917,370.73,374.06,31,0.62,0.21 +2002,11,2002.875,372.43,374.52,29,0.43,0.15 +2002,12,2002.9583,373.98,374.72,31,0.46,0.16 +2003,1,2003.0417,375.07,374.82,30,0.51,0.18 +2003,2,2003.125,375.82,374.95,27,0.58,0.21 +2003,3,2003.2083,376.64,374.99,28,0.63,0.23 +2003,4,2003.2917,377.92,375.24,27,0.37,0.14 +2003,5,2003.375,378.78,375.73,30,0.78,0.27 +2003,6,2003.4583,378.46,376.21,25,0.39,0.15 +2003,7,2003.5417,376.88,376.37,29,0.7,0.25 +2003,8,2003.625,374.57,376.27,23,0.57,0.23 +2003,9,2003.7083,373.34,376.65,25,0.37,0.14 +2003,10,2003.7917,373.31,376.65,30,0.33,0.12 +2003,11,2003.875,374.84,376.99,26,0.45,0.17 +2003,12,2003.9583,376.17,376.93,27,0.4,0.15 +2004,1,2004.0417,377.17,376.96,30,0.45,0.16 +2004,2,2004.125,378.05,377.19,29,0.74,0.26 +2004,3,2004.2083,379.06,377.4,27,0.84,0.31 +2004,4,2004.2917,380.54,377.8,26,0.52,0.19 +2004,5,2004.375,380.8,377.66,28,0.61,0.22 +2004,6,2004.4583,379.87,377.57,21,0.47,0.19 +2004,7,2004.5417,377.65,377.12,25,0.5,0.19 +2004,8,2004.625,376.17,377.9,16,0.45,0.21 +2004,9,2004.7083,374.43,377.8,15,0.56,0.28 +2004,10,2004.7917,374.63,378,29,0.19,0.07 +2004,11,2004.875,376.33,378.49,29,0.62,0.22 +2004,12,2004.9583,377.68,378.48,30,0.29,0.1 +2005,1,2005.0417,378.63,378.37,31,0.32,0.11 +2005,2,2005.125,379.91,379.1,24,0.6,0.24 +2005,3,2005.2083,380.95,379.45,26,1.16,0.44 +2005,4,2005.2917,382.48,379.84,26,0.53,0.2 +2005,5,2005.375,382.64,379.49,31,0.61,0.21 +2005,6,2005.4583,382.4,380.07,28,0.21,0.08 +2005,7,2005.5417,380.93,380.38,29,0.38,0.13 +2005,8,2005.625,378.93,380.61,26,0.53,0.2 +2005,9,2005.7083,376.89,380.2,27,0.51,0.19 +2005,10,2005.7917,377.19,380.5,14,0.15,0.08 +2005,11,2005.875,378.54,380.69,23,0.45,0.18 +2005,12,2005.9583,380.31,381.09,26,0.39,0.15 +2006,1,2006.0417,381.58,381.33,24,0.31,0.12 +2006,2,2006.125,382.4,381.58,25,0.51,0.2 +2006,3,2006.2083,382.86,381.32,29,0.55,0.2 +2006,4,2006.2917,384.8,382.11,25,0.49,0.19 +2006,5,2006.375,385.22,382.06,24,0.45,0.17 +2006,6,2006.4583,384.24,381.93,28,0.43,0.16 +2006,7,2006.5417,382.65,382.1,24,0.32,0.12 +2006,8,2006.625,380.6,382.27,27,0.47,0.17 +2006,9,2006.7083,379.04,382.35,25,0.42,0.16 +2006,10,2006.7917,379.33,382.66,23,0.4,0.16 +2006,11,2006.875,380.35,382.52,29,0.39,0.14 +2006,12,2006.9583,382.02,382.84,27,0.38,0.14 +2007,1,2007.0417,383.1,382.88,24,0.76,0.3 +2007,2,2007.125,384.12,383.22,21,0.81,0.34 +2007,3,2007.2083,384.81,383.17,27,0.63,0.23 +2007,4,2007.2917,386.73,383.95,25,0.76,0.29 +2007,5,2007.375,386.78,383.56,29,0.64,0.23 +2007,6,2007.4583,386.33,384.06,26,0.42,0.16 +2007,7,2007.5417,384.73,384.25,27,0.44,0.16 +2007,8,2007.625,382.24,383.95,22,0.64,0.26 +2007,9,2007.7083,381.2,384.56,21,0.45,0.19 +2007,10,2007.7917,381.37,384.72,29,0.19,0.07 +2007,11,2007.875,382.7,384.9,30,0.31,0.11 +2007,12,2007.9583,384.19,385.07,22,0.34,0.14 +2008,1,2008.0417,385.78,385.54,31,0.56,0.19 +2008,2,2008.125,386.06,385.2,26,0.58,0.22 +2008,3,2008.2083,386.28,384.72,30,0.6,0.21 +2008,4,2008.2917,387.34,384.71,22,1.19,0.49 +2008,5,2008.375,388.78,385.69,25,0.57,0.22 +2008,6,2008.4583,387.99,385.68,23,0.49,0.2 +2008,7,2008.5417,386.6,386.04,10,0.96,0.58 +2008,8,2008.625,384.32,385.98,25,0.66,0.25 +2008,9,2008.7083,383.41,386.68,27,0.34,0.12 +2008,10,2008.7917,383.21,386.49,23,0.27,0.11 +2008,11,2008.875,384.41,386.59,28,0.29,0.11 +2008,12,2008.9583,385.79,386.64,29,0.27,0.1 +2009,1,2009.0417,387.17,386.86,30,0.38,0.13 +2009,2,2009.125,387.7,386.81,26,0.49,0.18 +2009,3,2009.2083,389.04,387.54,28,0.68,0.25 +2009,4,2009.2917,389.76,387.15,29,0.85,0.3 +2009,5,2009.375,390.36,387.24,30,0.51,0.18 +2009,6,2009.4583,389.7,387.46,29,0.6,0.21 +2009,7,2009.5417,388.25,387.77,22,0.31,0.13 +2009,8,2009.625,386.29,387.99,28,0.62,0.22 +2009,9,2009.7083,384.95,388.22,28,0.56,0.2 +2009,10,2009.7917,384.64,387.88,30,0.31,0.11 +2009,11,2009.875,386.23,388.36,30,0.29,0.1 +2009,12,2009.9583,387.63,388.43,20,0.47,0.2 +2010,1,2010.0417,388.91,388.62,30,0.92,0.32 +2010,2,2010.125,390.41,389.47,20,1.31,0.56 +2010,3,2010.2083,391.37,389.85,25,1.05,0.4 +2010,4,2010.2917,392.67,390.12,26,0.65,0.24 +2010,5,2010.375,393.21,390.09,29,0.65,0.23 +2010,6,2010.4583,392.38,390.1,28,0.42,0.15 +2010,7,2010.5417,390.41,389.94,29,0.47,0.17 +2010,8,2010.625,388.54,390.21,26,0.41,0.16 +2010,9,2010.7083,387.03,390.32,29,0.55,0.19 +2010,10,2010.7917,387.43,390.72,31,0.27,0.09 +2010,11,2010.875,388.87,390.99,29,0.42,0.15 +2010,12,2010.9583,389.99,390.8,29,0.47,0.17 +2011,1,2011.0417,391.5,391.2,29,0.88,0.31 +2011,2,2011.125,392.05,391.12,28,0.47,0.17 +2011,3,2011.2083,392.8,391.28,29,0.97,0.35 +2011,4,2011.2917,393.44,390.84,28,0.73,0.26 +2011,5,2011.375,394.41,391.24,29,0.93,0.33 +2011,6,2011.4583,393.95,391.65,28,0.45,0.16 +2011,7,2011.5417,392.72,392.24,26,0.71,0.26 +2011,8,2011.625,390.33,392.03,27,0.42,0.15 +2011,9,2011.7083,389.28,392.6,26,0.31,0.12 +2011,10,2011.7917,389.19,392.52,30,0.17,0.06 +2011,11,2011.875,390.48,392.63,28,0.26,0.1 +2011,12,2011.9583,392.06,392.86,26,0.37,0.14 +2012,1,2012.0417,393.31,393.08,30,0.77,0.27 +2012,2,2012.125,394.04,393.21,26,1.19,0.45 +2012,3,2012.2083,394.59,393,30,0.63,0.22 +2012,4,2012.2917,396.38,393.65,29,0.59,0.21 +2012,5,2012.375,396.93,393.73,30,0.5,0.17 +2012,6,2012.4583,395.91,393.64,28,0.59,0.21 +2012,7,2012.5417,394.56,394.12,26,0.3,0.11 +2012,8,2012.625,392.59,394.36,30,0.52,0.18 +2012,9,2012.7083,391.32,394.74,26,0.42,0.16 +2012,10,2012.7917,391.27,394.63,28,0.23,0.08 +2012,11,2012.875,393.2,395.24,29,0.53,0.19 +2012,12,2012.9583,394.57,395.27,29,0.44,0.16 +2013,1,2013.0417,395.78,395.63,28,0.6,0.22 +2013,2,2013.125,397.03,396.24,25,0.57,0.22 +2013,3,2013.2083,397.66,396.08,30,0.71,0.25 +2013,4,2013.2917,398.64,395.8,22,0.59,0.24 +2013,5,2013.375,400.02,396.65,28,0.37,0.13 +2013,6,2013.4583,398.81,396.48,26,0.43,0.16 +2013,7,2013.5417,397.51,397.12,21,0.52,0.22 +2013,8,2013.625,395.39,397.26,27,0.45,0.16 +2013,9,2013.7083,393.72,397.23,26,0.35,0.13 +2013,10,2013.7917,393.9,397.24,28,0.16,0.06 +2013,11,2013.875,395.36,397.34,30,0.6,0.21 +2013,12,2013.9583,397.03,397.78,30,0.48,0.17 +2014,1,2014.0417,398.04,397.74,31,0.49,0.17 +2014,2,2014.125,398.27,397.46,27,0.51,0.19 +2014,3,2014.2083,399.91,398.38,22,0.84,0.34 +2014,4,2014.2917,401.51,398.64,26,0.5,0.19 +2014,5,2014.375,401.96,398.57,22,0.51,0.21 +2014,6,2014.4583,401.43,399.11,28,0.36,0.13 +2014,7,2014.5417,399.38,398.95,25,0.56,0.21 +2014,8,2014.625,397.32,399.2,21,0.22,0.09 +2014,9,2014.7083,395.64,399.2,21,0.56,0.24 +2014,10,2014.7917,396.29,399.69,24,0.75,0.29 +2014,11,2014.875,397.55,399.63,27,0.38,0.14 +2014,12,2014.9583,399.15,399.88,29,0.61,0.22 +2015,1,2015.0417,400.18,399.92,30,0.55,0.19 +2015,2,2015.125,400.55,399.78,28,0.63,0.23 +2015,3,2015.2083,401.74,400.23,24,1.02,0.4 +2015,4,2015.2917,403.34,400.47,26,0.86,0.32 +2015,5,2015.375,404.15,400.71,30,0.32,0.11 +2015,6,2015.4583,402.97,400.66,29,0.47,0.17 +2015,7,2015.5417,401.46,401.1,24,0.57,0.22 +2015,8,2015.625,399.11,401.03,28,0.74,0.27 +2015,9,2015.7083,397.82,401.43,25,0.32,0.12 +2015,10,2015.7917,398.49,401.88,28,0.56,0.2 +2015,11,2015.875,400.27,402.22,25,0.58,0.22 +2015,12,2015.9583,402.06,402.72,30,0.67,0.23 +2016,1,2016.0417,402.73,402.46,27,0.56,0.21 +2016,2,2016.125,404.25,403.41,25,1.11,0.43 +2016,3,2016.2083,405.06,403.55,28,0.81,0.29 +2016,4,2016.2917,407.6,404.78,23,1.04,0.41 +2016,5,2016.375,407.9,404.42,29,0.5,0.18 +2016,6,2016.4583,406.99,404.59,26,0.6,0.23 +2016,7,2016.5417,404.59,404.23,28,0.88,0.32 +2016,8,2016.625,402.45,404.39,24,0.6,0.23 +2016,9,2016.7083,401.23,404.84,25,0.44,0.17 +2016,10,2016.7917,401.79,405.22,29,0.3,0.11 +2016,11,2016.875,403.72,405.73,27,0.72,0.26 +2016,12,2016.9583,404.64,405.33,29,0.44,0.16 +2017,1,2017.0417,406.36,406.05,27,0.68,0.25 +2017,2,2017.125,406.66,405.82,26,0.71,0.27 +2017,3,2017.2083,407.54,406.06,24,1.03,0.4 +2017,4,2017.2917,409.22,406.38,26,0.86,0.32 +2017,5,2017.375,409.89,406.38,27,0.57,0.21 +2017,6,2017.4583,409.08,406.69,26,0.54,0.2 +2017,7,2017.5417,407.33,407,28,0.61,0.22 +2017,8,2017.625,405.32,407.29,29,0.32,0.12 +2017,9,2017.7083,403.57,407.16,26,0.37,0.14 +2017,10,2017.7917,403.82,407.21,27,0.3,0.11 +2017,11,2017.875,405.31,407.34,26,0.41,0.15 +2017,12,2017.9583,407,407.71,31,0.57,0.2 +2018,1,2018.0417,408.15,407.89,29,0.55,0.19 +2018,2,2018.125,408.52,407.65,28,0.52,0.19 +2018,3,2018.2083,409.59,408.09,29,0.65,0.23 +2018,4,2018.2917,410.45,407.65,21,0.9,0.38 +2018,5,2018.375,411.44,407.94,24,0.86,0.33 +2018,6,2018.4583,410.99,408.59,29,0.61,0.22 +2018,7,2018.5417,408.9,408.55,27,0.46,0.17 +2018,8,2018.625,407.16,409.07,31,0.28,0.1 +2018,9,2018.7083,405.71,409.28,29,0.45,0.16 +2018,10,2018.7917,406.19,409.61,30,0.32,0.11 +2018,11,2018.875,408.21,410.24,24,0.56,0.22 +2018,12,2018.9583,409.27,410.01,30,0.5,0.17 +2019,1,2019.0417,411.03,410.78,26,1.26,0.47 +2019,2,2019.125,411.96,411.09,27,1.14,0.42 +2019,3,2019.2083,412.18,410.68,28,1.12,0.4 +2019,4,2019.2917,413.54,410.74,27,0.6,0.22 +2019,5,2019.375,414.86,411.37,28,0.5,0.18 +2019,6,2019.4583,414.16,411.76,27,0.36,0.13 +2019,7,2019.5417,411.97,411.62,25,0.82,0.31 +2019,8,2019.625,410.18,412.09,29,0.33,0.12 +2019,9,2019.7083,408.79,412.36,29,0.35,0.13 +2019,10,2019.7917,408.75,412.17,29,0.31,0.11 +2019,11,2019.875,410.48,412.5,26,0.4,0.15 +2019,12,2019.9583,411.98,412.72,31,0.4,0.14 +2020,1,2020.0417,413.61,413.35,29,0.73,0.26 +2020,2,2020.125,414.34,413.47,28,0.69,0.25 +2020,3,2020.2083,414.74,413.24,26,0.33,0.12 +2020,4,2020.2917,416.45,413.65,28,0.65,0.24 +2020,5,2020.375,417.31,413.81,27,0.61,0.23 +2020,6,2020.4583,416.62,414.22,27,0.45,0.16 +2020,7,2020.5417,414.61,414.26,30,0.57,0.2 +2020,8,2020.625,412.78,414.69,25,0.25,0.1 +2020,9,2020.7083,411.52,415.1,29,0.31,0.11 +2020,10,2020.7917,411.51,414.92,30,0.22,0.08 +2020,11,2020.875,413.11,415.14,27,0.8,0.29 +2020,12,2020.9583,414.25,415,30,0.48,0.17 +2021,1,2021.0417,415.52,415.26,29,0.44,0.16 +2021,2,2021.125,416.75,415.88,28,1.01,0.36 diff --git a/pandas/tests/io/data/rdata/ppm_df.rda b/pandas/tests/io/data/rdata/ppm_df.rda new file mode 100644 index 0000000000000..b900815050a55 Binary files /dev/null and b/pandas/tests/io/data/rdata/ppm_df.rda differ diff --git a/pandas/tests/io/data/rdata/ppm_df.rds b/pandas/tests/io/data/rdata/ppm_df.rds new file mode 100644 index 0000000000000..242a3e2b11236 Binary files /dev/null and b/pandas/tests/io/data/rdata/ppm_df.rds differ diff --git a/pandas/tests/io/data/rdata/ppm_ts.rds b/pandas/tests/io/data/rdata/ppm_ts.rds new file mode 100644 index 0000000000000..3f49b7d24f6b0 Binary files /dev/null and b/pandas/tests/io/data/rdata/ppm_ts.rds differ diff --git a/pandas/tests/io/data/rdata/sea_ice_df.rds b/pandas/tests/io/data/rdata/sea_ice_df.rds new file mode 100644 index 0000000000000..23229ca9a87db Binary files /dev/null and b/pandas/tests/io/data/rdata/sea_ice_df.rds differ diff --git a/pandas/tests/io/data/rdata/species_mtx.rds b/pandas/tests/io/data/rdata/species_mtx.rds new file mode 100644 index 0000000000000..aa9ebe379e50a Binary files /dev/null and b/pandas/tests/io/data/rdata/species_mtx.rds differ diff --git a/pandas/tests/io/test_rdata.py b/pandas/tests/io/test_rdata.py new file mode 100644 index 0000000000000..16674f32d3d7e --- /dev/null +++ b/pandas/tests/io/test_rdata.py @@ -0,0 +1,872 @@ +import gzip +from io import BytesIO +import os +import pickle +import shutil +from urllib.error import HTTPError + +import numpy as np +import pytest + +from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime +from pandas.compat import ( + IS64, + PY38, +) +import pandas.util._test_decorators as td + +from pandas import ( + Categorical, + DataFrame, + Timestamp, + array, + interval_range, + period_range, + to_datetime, +) +import pandas._testing as tm +from pandas.arrays import SparseArray + +from pandas.io.rdata._rdata import ( + LibrdataReader, + LibrdataReaderError, + LibrdataWriter, + LibrdataWriterError, +) +from pandas.io.rdata.rdata_reader import read_rdata + +ghg_df = DataFrame( + { + "gas": { + 141: "Carbon dioxide", + 142: "Methane", + 143: "Nitrous oxide", + 144: "Fluorinated gases", + 145: "Total", + }, + "year": {141: 2018, 142: 2018, 143: 2018, 144: 2018, 145: 2018}, + "emissions": { + 141: 5424.881502132882, + 142: 634.4571270782675, + 143: 434.52855537666636, + 144: 182.78243246177678, + 145: 6676.649617049592, + }, + } +).rename_axis("rownames") + +plants_df = DataFrame( + { + "plant_group": { + 16: "Pteridophytes", + 17: "Pteridophytes", + 18: "Pteridophytes", + 19: "Pteridophytes", + 20: "Pteridophytes", + }, + "status": { + 16: "Data Deficient", + 17: "Extinct", + 18: "Not Threatened", + 19: "Possibly Threatened", + 20: "Threatened", + }, + "count": {16: 398, 17: 65, 18: 1294, 19: 408, 20: 1275}, + } +).rename_axis("rownames") + +sea_ice_df = DataFrame( + { + "year": {1012: 2016, 1013: 2017, 1014: 2018, 1015: 2019, 1016: 2020}, + "mo": {1012: 12, 1013: 12, 1014: 12, 1015: 12, 1016: 12}, + "data.type": { + 1012: "Goddard", + 1013: "Goddard", + 1014: "Goddard", + 1015: "Goddard", + 1016: "NRTSI-G", + }, + "region": {1012: "S", 1013: "S", 1014: "S", 1015: "S", 1016: "S"}, + "extent": {1012: 8.28, 1013: 9.48, 1014: 9.19, 1015: 9.41, 1016: 10.44}, + "area": {1012: 5.51, 1013: 6.23, 1014: 5.59, 1015: 6.59, 1016: 6.5}, + } +).rename_axis("rownames") + +ppm_df = DataFrame( + { + "date": { + 754: Timestamp("2020-12-16 23:42:25.920000256"), + 755: Timestamp("2021-01-16 11:17:31.199999744"), + 756: Timestamp("2021-02-15 21:00:00"), + 757: Timestamp("2021-03-18 06:42:28.800000256"), + 758: Timestamp("2021-04-17 17:17:31.199999744"), + }, + "decimal_date": { + 754: 2020.9583, + 755: 2021.0417, + 756: 2021.125, + 757: 2021.2083, + 758: 2021.2917, + }, + "monthly_average": { + 754: 414.25, + 755: 415.52, + 756: 416.75, + 757: 417.64, + 758: 419.05, + }, + "deseasonalized": { + 754: 414.98, + 755: 415.26, + 756: 415.93, + 757: 416.18, + 758: 416.23, + }, + "num_days": {754: 30, 755: 29, 756: 28, 757: 28, 758: 24}, + "std_dev_of_days": {754: 0.47, 755: 0.44, 756: 1.02, 757: 0.86, 758: 1.12}, + "unc_of_mon_mean": {754: 0.17, 755: 0.16, 756: 0.37, 757: 0.31, 758: 0.44}, + } +).rename_axis("rownames") + + +@pytest.fixture(params=["rda", "rds"]) +def rtype(request): + return request.param + + +@pytest.fixture(params=[None, "gzip", "bz2", "xz"]) +def comp(request): + return request.param + + +# RDATA READER + + +# PATH_OR_BUFFER + + +def test_read_rds_file(datapath): + filename = datapath("io", "data", "rdata", "ghg_df.rds") + r_dfs = read_rdata(filename) + + tm.assert_frame_equal(ghg_df, r_dfs["r_dataframe"].tail()) + + +def test_read_rda_file(datapath): + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + r_dfs = read_rdata(filename) + + assert list(r_dfs.keys()) == ["ghg_df", "plants_df", "sea_ice_df"] + + tm.assert_frame_equal(ghg_df, r_dfs["ghg_df"].tail()) + tm.assert_frame_equal(plants_df, r_dfs["plants_df"].tail()) + tm.assert_frame_equal(sea_ice_df, r_dfs["sea_ice_df"].tail()) + + +def test_read_rds_filelike(datapath): + filename = datapath("io", "data", "rdata", "sea_ice_df.rds") + + with open(filename, "rb") as f: + r_dfs = read_rdata(f, file_format="rds") + + tm.assert_frame_equal(sea_ice_df, r_dfs["r_dataframe"].tail()) + + +def test_read_rda_filelike(datapath): + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + + with open(filename, "rb") as f: + r_dfs = read_rdata(f, file_format="rda") + + assert list(r_dfs.keys()) == ["ghg_df", "plants_df", "sea_ice_df"] + + tm.assert_frame_equal(ghg_df, r_dfs["ghg_df"].tail()) + tm.assert_frame_equal(plants_df, r_dfs["plants_df"].tail()) + tm.assert_frame_equal(sea_ice_df, r_dfs["sea_ice_df"].tail()) + + +def test_bytesio_rds(datapath): + filename = datapath("io", "data", "rdata", "sea_ice_df.rds") + + with open(filename, "rb") as f: + with BytesIO(f.read()) as b_io: + r_dfs = read_rdata(b_io, file_format="rds") + + tm.assert_frame_equal(sea_ice_df, r_dfs["r_dataframe"].tail()) + + +def test_bytesio_rda(datapath): + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + + with open(filename, "rb") as f: + with BytesIO(f.read()) as b_io: + r_dfs = read_rdata(b_io, file_format="rda") + + assert list(r_dfs.keys()) == ["ghg_df", "plants_df", "sea_ice_df"] + + tm.assert_frame_equal(ghg_df, r_dfs["ghg_df"].tail()) + tm.assert_frame_equal(plants_df, r_dfs["plants_df"].tail()) + tm.assert_frame_equal(sea_ice_df, r_dfs["sea_ice_df"].tail()) + + +# FILE FORMAT + + +def test_read_wrong_format(datapath): + with pytest.raises(ValueError, match="not a valid value for file_format"): + filename = datapath("io", "data", "rdata", "plants_df.rds") + read_rdata(filename, file_format="r") + + +def test_read_wrong_file(): + with pytest.raises(FileNotFoundError, match="file cannot be found"): + filename = os.path.join("data", "rdata", "plants_df.rda") + read_rdata(filename) + + +def test_read_rds_non_df(datapath): + with pytest.raises( + LibrdataReaderError, + match="Invalid file, or file has unsupported features", + ): + filename = datapath("io", "data", "rdata", "ppm_ts.rds") + read_rdata(filename) + + +def test_read_rda_non_dfs(datapath): + with pytest.raises( + LibrdataReaderError, + match="Invalid file, or file has unsupported features", + ): + filename = datapath("io", "data", "rdata", "env_data_non_dfs.rda") + read_rdata(filename) + + +def test_read_not_rda_file(datapath): + with pytest.raises( + LibrdataReaderError, match="The file contains an unrecognized object" + ): + filename = datapath("io", "data", "rdata", "ppm_df.csv") + read_rdata(filename, file_format="rda", compression=None) + + +def test_bytes_read_infer_rds(datapath): + filename = datapath("io", "data", "rdata", "sea_ice_df.rds") + + with pytest.raises(ValueError, match="Unable to infer file format from file name"): + with open(filename, "rb") as f: + read_rdata(f) + + +def test_bytes_read_infer_rda(datapath): + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + + with pytest.raises(ValueError, match="Unable to infer file format from file name"): + with open(filename, "rb") as f: + read_rdata(f) + + +# URL + + +@tm.network +def test_read_rda_url(): + url_df = DataFrame( + { + "carrier": {1: "9E", 2: "AA", 3: "AS", 4: "B6", 5: "DL"}, + "name": { + 1: "Endeavor Air Inc.", + 2: "American Airlines Inc.", + 3: "Alaska Airlines Inc.", + 4: "JetBlue Airways", + 5: "Delta Air Lines Inc.", + }, + } + ).rename_axis("rownames") + + url = ( + "https://github.com/hadley/nycflights13/blob/master/data/airlines.rda?raw=true" + ) + r_dfs = read_rdata(url, file_format="rda") + + tm.assert_frame_equal(url_df, r_dfs["airlines"].head()) + + +@tm.network +def test_read_unable_infer_format(): + with pytest.raises(ValueError, match="Unable to infer file format from file name"): + url = ( + "https://github.com/hadley/nycflights13/" + "blob/master/data/airlines.rda?raw=true" + ) + read_rdata(url) + + +@tm.network +def test_read_wrong_url(): + with pytest.raises(HTTPError, match="HTTP Error 404: Not Found"): + url = "https://example.com/data.rdata" + read_rdata(url) + + +# S3 + + +@pytest.mark.slow +@tm.network +@td.skip_if_no("s3fs") +def test_read_rda_s3(): + # Public Data of CRAN Packages on GitHub + rda_s3 = "s3://public-r-data/ghcran.Rdata" + r_df = read_rdata(rda_s3, compression=None, rownames=False) + + # below needed to pass codespell on keyword + r_df["ghcran"].columns.values[107] = "Repository" + + # test structure and not static data since data changes daily + expected_cols = [ + "Package", + "Type", + "Title", + "Version", + "Date", + "Author", + "Maintainer", + "Description", + "License", + "Depends", + "Suggests", + "NeedsCompilation", + "Packaged", + "Repository", + "Date/Publication", + "Contact", + "Imports", + "VignetteBuilder", + "Encoding", + "SystemRequirements", + "RoxygenNote", + "LazyLoad", + "URL", + "Authors@R", + "Classification/ACM", + "Classification/JEL", + "LinkingTo", + "BugReports", + "LazyData", + "Keywords", + "Repository/R-Forge/Project", + "Repository/R-Forge/Revision", + "Repository/R-Forge/DateTimeStamp", + "biocViews", + "Collate", + "Copyright", + "ByteCompile", + "ZipData", + "BuildVignettes", + "Additional_repositories", + "Acknowledgements", + "MailingList", + "Enhances", + "Classification/MSC", + "OS_type", + "BuildManual", + "BuildResaveData", + "References", + "Note", + "X-CRAN-Original-Maintainer", + "RcppModules", + "Data", + "BioViews", + "lazy-loading", + "URLNote", + "Reference", + "KeepSource", + "LazyDataCompression", + "Language", + "Requires", + "Dependencies", + "X-CRAN-Comment", + "Citation", + "Biarch", + "Published", + "RequiredLauncherGeneration", + "SuggestsNote", + "Priority", + "Acknowledgments", + "Revision", + "License_is_FOSS", + "License_restricts_use", + "Archs", + "LazyDataNote", + "Affiliations", + "LicenseDetails", + "SCM", + "Classification/ACM-2012", + "X-CRAN-Original-Package", + "Dialect", + "Limitations", + "Check", + "Recommends", + "LastChangedDate", + "LastChangedRevision", + "SVNRevision", + "X-CRAN-Original-OS_type", + "RcmdrModels", + "Log-Exceptions", + "Models", + "DateNote", + "SystemRequirementsNote", + "Url", + "Reverse depends", + "Lazyload", + "DependsNote", + "VersionSplus", + "MaintainerSplus", + "VersionNote", + "Disclaimer", + "LicenseNote", + "Namespace", + "Address", + "Keyword", + "Contributors", + "NOTE", + "Acknowledgement", + "Repository", + "Lazydata", + "RdMacros", + "HowToCite", + "Publication", + "Reference Manual", + "Special Acknowledgement", + "SysDataCompression", + "DisplayMode", + "Nickname", + "BuildKeepEmpty", + "Twitter", + "Remotes", + "SystemRequirement", + "Github", + ] + + assert isinstance(r_df, dict) + assert isinstance(r_df["ghcran"], DataFrame) + assert r_df["ghcran"].columns.tolist() == expected_cols + + +# TYPE + + +def test_read_rds_df_output(datapath): + filename = datapath("io", "data", "rdata", "sea_ice_df.rds") + r_dfs = read_rdata(filename) + + assert isinstance(r_dfs, dict) + assert list(r_dfs.keys()) == ["r_dataframe"] + + +def test_read_rda_dict_output(datapath): + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + r_dfs = read_rdata(filename) + + assert isinstance(r_dfs, dict) + assert list(r_dfs.keys()) == ["ghg_df", "plants_df", "sea_ice_df"] + + +# SELECT_FRAMES + + +def test_read_select_frames_rda_dfs(datapath): + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + r_dfs = read_rdata(filename, select_frames=["ghg_df", "sea_ice_df"]) + + assert "plants_df" not in list(r_dfs.keys()) + assert "ghg_df" in list(r_dfs.keys()) + assert "sea_ice_df" in list(r_dfs.keys()) + + +def test_read_wrong_select_frames(datapath): + with pytest.raises(TypeError, match="not a valid type for select_frames"): + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + read_rdata(filename, select_frames="plants_df") + + +# ROWNAMES + + +def test_read_rownames_true_rds(datapath): + filename = datapath("io", "data", "rdata", "sea_ice_df.rds") + r_df = read_rdata(filename, rownames=True)["r_dataframe"] + + if isinstance(r_df, DataFrame): + assert r_df.index.name == "rownames" + + +def test_read_rownames_false_rds(datapath): + filename = datapath("io", "data", "rdata", "sea_ice_df.rds") + r_df = read_rdata(filename, rownames=False)["r_dataframe"] + + if isinstance(r_df, DataFrame): + assert r_df.index.name != "rownames" + + +def test_read_rownames_true_rda(datapath): + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + r_dfs = read_rdata(filename, rownames=True) + + assert r_dfs["ghg_df"].index.name == "rownames" + assert r_dfs["plants_df"].index.name == "rownames" + assert r_dfs["sea_ice_df"].index.name == "rownames" + + +def test_read_rownames_false_rda(datapath): + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + r_dfs = read_rdata(filename, rownames=False) + + assert r_dfs["ghg_df"].index.name != "rownames" + assert r_dfs["plants_df"].index.name != "rownames" + assert r_dfs["sea_ice_df"].index.name != "rownames" + + +# ENCODING + + +def test_non_utf8_data(datapath, rtype): + filename = datapath("io", "data", "rdata", f"climate_non_utf8_df.{rtype}") + with pytest.raises(SystemError, match=("returned a result with an error set")): + read_rdata(filename) + + +# DATE / TIME + + +def test_utc_datetime_convert(datapath): + filename = datapath("io", "data", "rdata", "ppm_df.rda") + r_dfs = read_rdata(filename) + + assert str(r_dfs["ppm_df"]["date"].dtype) == "datetime64[ns]" + + tm.assert_frame_equal(ppm_df, r_dfs["ppm_df"].tail()) + + +def test_read_outbound_dates(datapath, rtype): + filename = datapath("io", "data", "rdata", f"planetary_boundaries_df.{rtype}") + with pytest.raises( + OutOfBoundsDatetime, match=("cannot convert input with unit 's'") + ): + read_rdata(filename) + + +# RDATA WRITER + +# PATH_OR_BUFFER + + +def test_write_read_file(rtype): + with tm.ensure_clean("test.out") as path: + ghg_df.to_rdata(path, file_format=rtype, index=False) + r_dfs = read_rdata(path, file_format=rtype, rownames=False) + + expected = ghg_df.reset_index(drop=True) + output = r_dfs["pandas_dataframe"] if rtype == "rda" else r_dfs["r_dataframe"] + + tm.assert_frame_equal(output, expected) + + +def test_write_read_pathlib(rtype): + from pathlib import Path + + with tm.ensure_clean_dir() as tmp_dir: + tmp_file = Path(tmp_dir).joinpath("test.out") + sea_ice_df.to_rdata(tmp_file, file_format=rtype, index=False) + r_dfs = read_rdata(tmp_file, file_format=rtype, rownames=False) + + expected = sea_ice_df.reset_index(drop=True) + output = r_dfs["pandas_dataframe"] if rtype == "rda" else r_dfs["r_dataframe"] + + tm.assert_frame_equal(output, expected) + + +def test_write_read_filelike(rtype): + with BytesIO() as b_io: + sea_ice_df.to_rdata(b_io, file_format=rtype, compression=None, index=False) + r_dfs = read_rdata( + b_io.getvalue(), + file_format=rtype, + rownames=False, + compression=None, + ) + + expected = sea_ice_df.reset_index(drop=True) + output = r_dfs["pandas_dataframe"] if rtype == "rda" else r_dfs["r_dataframe"] + + tm.assert_frame_equal(output, expected) + + +# FILE FORMAT + + +def test_write_wrong_format(): + with tm.ensure_clean("test.rda") as path: + with pytest.raises(ValueError, match=("not a valid value for file_format")): + ghg_df.to_rdata(path, file_format="csv") + + +def test_write_unable_to_infer(): + with tm.ensure_clean("test") as path: + with pytest.raises( + ValueError, match=("Unable to infer file format from file name") + ): + ghg_df.to_rdata(path) + + +# INDEX + + +def test_write_index_true(rtype): + with tm.ensure_clean("test.out") as path: + plants_df.rename_axis(None).to_rdata(path, file_format=rtype, index=True) + r_dfs = read_rdata(path, file_format=rtype) + + r_df = r_dfs if rtype == "rds" else r_dfs["pandas_dataframe"] + + if isinstance(r_df, DataFrame): + assert "index" in r_df.columns + + +def test_write_index_false(rtype): + with tm.ensure_clean("test.out") as path: + plants_df.rename_axis(None).to_rdata(path, file_format=rtype, index=False) + r_dfs = read_rdata(path, file_format=rtype) + + r_df = r_dfs if rtype == "rds" else r_dfs["pandas_dataframe"] + + if isinstance(r_df, DataFrame): + assert "index" not in r_df.columns + + +# COMPRESSION + + +def test_write_all_compression(rtype, comp): + with tm.ensure_clean("test.out") as path: + ghg_df.to_rdata(path, file_format=rtype, compression=comp, index=False) + r_dfs = read_rdata(path, file_format=rtype, compression=comp, rownames=False) + + expected = ghg_df.reset_index(drop=True) + output = r_dfs["pandas_dataframe"] if rtype == "rda" else r_dfs["r_dataframe"] + + tm.assert_frame_equal(output, expected) + + +def test_write_zip_compression(rtype): + with tm.ensure_clean("test.out") as path: + with pytest.raises(ValueError, match=("not a supported value for compression")): + ghg_df.to_rdata(path, file_format=rtype, compression="zip") + + +@pytest.mark.skipif( + not PY38, + reason=("gzip.BadGzipFile exception added in 3.8"), +) +def test_write_read_mismatched_compression(rtype): + with tm.ensure_clean("test.out") as path: + with pytest.raises(gzip.BadGzipFile, match=("Not a gzipped file")): + ghg_df.to_rdata(path, file_format=rtype, compression=None) + read_rdata(path, file_format=rtype) + + +# RDA_NAMES + + +def test_write_new_rda_name(): + with tm.ensure_clean("test.rda") as path: + ghg_df.to_rdata(path, rda_name="py_df") + r_dfs = read_rdata(path) + + assert "py_df" in list(r_dfs.keys()) + + +# PROBLEM DATA + + +def test_write_nested_list(rtype, comp): + plants_df["plants_dict"] = plants_df["plant_group"].apply( + lambda x: plants_df["plant_group"].unique() + ) + with tm.ensure_clean("test") as path: + with pytest.raises( + LibrdataWriterError, + match=("DataFrame contains one more invalid types or data values"), + ): + plants_df.to_rdata(path, file_format=rtype, compression=comp) + + +# DATE / TIME + + +def test_write_read_utc_dateteime(): + with tm.ensure_clean("test.rda") as path: + ppm_df.to_rdata(path, index=False) + r_dfs = read_rdata(path, rownames=False) + + ppm_df["date"] = ppm_df["date"].dt.floor("S") + + tm.assert_frame_equal(ppm_df.reset_index(drop=True), r_dfs["pandas_dataframe"]) + + +# DTYPES + + +@pytest.mark.skipif( + not IS64, + reason=("large dtypes not supported in 32-bit"), +) +def test_write_read_dtypes(rtype, comp): + rda_name = "pandas_dataframe" if rtype == "rda" else "r_dataframe" + + dts = [ + Timestamp.min.ceil("S"), + Timestamp(-(10 ** 18)), + Timestamp(0), + Timestamp(10 ** 18), + Timestamp.now().floor("S"), + Timestamp.max.floor("S"), + ] + + arr = np.random.randn(6) + arr[2:-2] = np.nan + + dtypes_df = DataFrame( + { + "categ": Categorical( + ["ocean", "climate", "biosphere", "land", "freshwater", "atmosphere"] + ), + "interval": interval_range(start=10, periods=6, freq=10 * 2), + "bool": [False, True, True, True, False, False], + "int": [2 ** 31 - 1, 1, -(2 ** 31) + 1, -1, 0, 10 ** 9], + "float": [0, np.pi, float("nan"), np.e, np.euler_gamma, 0], + "string": array( + ["acidification", "change", "loss", "use", "depletion", "aersols"], + dtype="string", + ), + "sparse": SparseArray(arr), + "period": period_range( + start="2021-01-01 00:00:00", end="2021-06-01 00:00:00", freq="M" + ), + "datetime": to_datetime(dts), + "datetime_tz": to_datetime(dts).tz_localize("utc"), + "timedelta": [(dt - Timestamp(0)) for dt in dts], + } + ) + + with tm.ensure_clean("test") as path: + dtypes_df.to_rdata(path, file_format=rtype, index=False, compression=comp) + r_df = read_rdata(path, file_format=rtype, rownames=False, compression=comp)[ + rda_name + ] + + # convert non-primitive and non-datetimes to objects not supported in R + excl_types = ["bool", "number", "object", "datetime", "datetimetz", "timedelta"] + for col in dtypes_df.select_dtypes(exclude=excl_types).columns: + dtypes_df[col] = dtypes_df[col].astype(str) + + # convert special types + dtypes_df["sparse"] = np.array(dtypes_df["sparse"].values, dtype="float64") + dtypes_df["datetime_tz"] = dtypes_df["datetime_tz"].dt.tz_localize(None) + dtypes_df["timedelta"] = dtypes_df["timedelta"].dt.total_seconds() + + tm.assert_frame_equal(dtypes_df, r_df) + + +# CYTHON CLASSES + + +def test_reader_unpickled(datapath, rtype): + if rtype == "rda": + filename = datapath("io", "data", "rdata", "env_data_dfs.rda") + rda_name = "sea_ice_df" + elif rtype == "rds": + filename = datapath("io", "data", "rdata", "plants_df.rds") + rda_name = "r_dataframe" + + lbr1 = LibrdataReader() + + with tm.ensure_clean("test.pkl") as pklpath: + with open(pklpath, "wb") as f_w: + pickle.dump(lbr1, f_w) + + with open(pklpath, "rb") as f_r: + lbr2 = pickle.load(f_r) + + with tm.ensure_clean("test") as r_temp: + # need to decompress to temp file + with gzip.open(filename, "rb") as f_r: + with open(r_temp, "wb") as f_w: + shutil.copyfileobj(f_r, f_w) + + df_output = read_rdata( + r_temp, file_format=rtype, compression=None, rownames=False + )[rda_name].to_dict() + + cy_output = lbr2.read_rdata(r_temp) + + lbr_output = { + vcol: vdata + for (kdata, vdata), (kcol, vcol) in zip( + cy_output[rda_name]["data"].items(), cy_output[rda_name]["colnames"].items() + ) + } + + assert lbr_output == df_output + + +def test_writer_unpickled(datapath, rtype): + rda_name = "test_frame" if rtype == "rda" else "r_dataframe" + + lbw1 = LibrdataWriter() + + with tm.ensure_clean("test.pkl") as pklpath: + with open(pklpath, "wb") as f_w: + pickle.dump(lbw1, f_w) + + with open(pklpath, "rb") as f_r: + lbw2 = pickle.load(f_r) + + rdict = {"dtypes": {k: str(v) for k, v in ghg_df.dtypes.to_dict().items()}} + for k, v in rdict["dtypes"].items(): + if any(x in v for x in ("bool", "Boolean")): + rdict["dtypes"][k] = "bool" + + elif any(x in v for x in ("int", "uint", "Int", "UInt")): + rdict["dtypes"][k] = "int" + + elif any(x in v for x in ("float", "Float")): + rdict["dtypes"][k] = "float" + + elif any(x in v for x in ("datetime", "Datetime")): + rdict["dtypes"][k] = "datetime" + + elif any(x in v for x in ("object", "string", "String")): + rdict["dtypes"][k] = "object" + + rdict["data"] = ghg_df.reset_index(drop=True).to_dict() + + expected = ghg_df.reset_index(drop=True) + + with tm.ensure_clean("test") as r_temp: + lbw2.write_rdata( + rfile=r_temp, + rdict=rdict, + rformat=rtype, + tbl_name="test_frame", + ) + + output = read_rdata( + r_temp, + file_format=rtype, + rownames=False, + compression=None, + )[rda_name] + + tm.assert_frame_equal(output, expected) diff --git a/setup.py b/setup.py index 337719053585c..3d682610a2e46 100755 --- a/setup.py +++ b/setup.py @@ -225,6 +225,7 @@ class CheckSDist(sdist_class): "pandas/_libs/window/indexers.pyx", "pandas/_libs/writers.pyx", "pandas/io/sas/sas.pyx", + "pandas/io/rdata/_rdata.pyx", ] _cpp_pyxfiles = [ @@ -327,6 +328,11 @@ def run(self): extra_compile_args = [] extra_link_args = [] + +rdata_includes = [] +rdata_libs_dir = [] +rdata_libs = [] + if is_platform_windows(): if debugging_symbols_requested: extra_compile_args.append("/Z7") @@ -364,6 +370,11 @@ def run(self): # https://github.com/pandas-dev/pandas/issues/35559 extra_compile_args.append("-Wno-error=unreachable-code") + # rdata requires system iconv library + rdata_includes = ["/usr/include"] + rdata_libs_dir = ["/usr/lib"] + rdata_libs = ["iconv"] + # enable coverage by building cython files by setting the environment variable # "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext # with `--with-cython-coverage`enabled @@ -640,7 +651,37 @@ def srcpath(name=None, suffix=".pyx", subdir="src"): extensions.append(ujson_ext) # ---------------------------------------------------------------------- +# rdata + +rdata_srcs = [ + "pandas/io/rdata/_rdata.pyx", + "pandas/_libs/src/librdata/rdata_parser.c", + "pandas/_libs/src/librdata/rdata_read.c", + "pandas/_libs/src/librdata/rdata_write.c", + "pandas/_libs/src/librdata/rdata_io_unistd.c", + "pandas/_libs/src/librdata/rdata_error.c", + "pandas/_libs/src/librdata/rdata_bits.c", + "pandas/_libs/src/librdata/CKHashTable.c", +] +if is_platform_windows(): + rdata_srcs.append("pandas/_libs/src/librdata/win_iconv.c") + +rdata_ext = Extension( + name="pandas.io.rdata._rdata", + sources=rdata_srcs, + include_dirs=rdata_includes, + library_dirs=rdata_libs_dir, + libraries=rdata_libs, + language="c", + define_macros=macros, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, +) + +extensions.append(rdata_ext) + +# ---------------------------------------------------------------------- if __name__ == "__main__": # Freeze to support parallel compilation when using spawn instead of fork
- [X] follows up on #40884 - [X] tests added / passed - [X] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [X] whatsnew entry - [X] user_guide/io entry ### Proposed RData I/O module interfaces to the C library: [librdata](https://github.com/WizardMac/librdata). Overall, this PR includes following changes: - **setup.py**: pandas/setup.py (new rdata section at the bottom) - **librdata**: pandas/_libs/src/librdata (C and header files and iconv scripts) - **rdata IO**: pandas/io/rdata (Cython and Python scripts) - **frame.py**: pandas/core/frame.py (for `DataFrame.to_rdata`) - **tests**: pandas/tests/io/test_rdata.py - **tests data**: pandas/tests/io/data/rdata (R data files in gzip compression) - **docs**: librdata license, user_guide/io.rst, whatsnew/v1.3.0.rst Note: special handling of `iconv`, a system resource built-in to Unix machines, is required: - For Linux, to centralize from different locations, the `iconv.h` header of the GNU C library is included. - For Mac, setup.py points to system folders, `/usr/include` and `/usr/lib`, which may differ from users installs. - For Windows , since `iconv` is not built-in, two counterpart files (.h and .c script) were added from this repo, [win-iconv](https://github.com/win-iconv/win-iconv), where its readme indicates code is placed in the public domain.
https://api.github.com/repos/pandas-dev/pandas/pulls/41386
2021-05-08T18:46:09Z
2021-08-18T00:23:37Z
null
2021-08-18T00:23:57Z
TYP: SelectionMixin
diff --git a/pandas/_typing.py b/pandas/_typing.py index 1e1fffdd60676..7763b0ceb610a 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -56,6 +56,7 @@ from pandas.core.generic import NDFrame from pandas.core.groupby.generic import ( DataFrameGroupBy, + GroupBy, SeriesGroupBy, ) from pandas.core.indexes.base import Index @@ -158,6 +159,7 @@ AggObjType = Union[ "Series", "DataFrame", + "GroupBy", "SeriesGroupBy", "DataFrameGroupBy", "BaseWindow", diff --git a/pandas/core/apply.py b/pandas/core/apply.py index ad25eb6fbcaa8..9d3437fe08b24 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -24,6 +24,7 @@ AggFuncTypeDict, AggObjType, Axis, + FrameOrSeries, FrameOrSeriesUnion, ) from pandas.util._decorators import cache_readonly @@ -60,10 +61,7 @@ Index, Series, ) - from pandas.core.groupby import ( - DataFrameGroupBy, - SeriesGroupBy, - ) + from pandas.core.groupby import GroupBy from pandas.core.resample import Resampler from pandas.core.window.rolling import BaseWindow @@ -1089,11 +1087,9 @@ def apply_standard(self) -> FrameOrSeriesUnion: class GroupByApply(Apply): - obj: SeriesGroupBy | DataFrameGroupBy - def __init__( self, - obj: SeriesGroupBy | DataFrameGroupBy, + obj: GroupBy[FrameOrSeries], func: AggFuncType, args, kwargs, diff --git a/pandas/core/base.py b/pandas/core/base.py index 3270e3dd82f7d..542fd54ce0ac7 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -8,6 +8,8 @@ from typing import ( TYPE_CHECKING, Any, + Generic, + Hashable, TypeVar, cast, ) @@ -19,6 +21,7 @@ ArrayLike, Dtype, DtypeObj, + FrameOrSeries, IndexLabel, Shape, final, @@ -163,13 +166,15 @@ class SpecificationError(Exception): pass -class SelectionMixin: +class SelectionMixin(Generic[FrameOrSeries]): """ mixin implementing the selection & aggregation interface on a group-like object sub-classes need to define: obj, exclusions """ + obj: FrameOrSeries _selection: IndexLabel | None = None + exclusions: frozenset[Hashable] _internal_names = ["_cache", "__setstate__"] _internal_names_set = set(_internal_names) @@ -194,15 +199,10 @@ def _selection_list(self): @cache_readonly def _selected_obj(self): - # error: "SelectionMixin" has no attribute "obj" - if self._selection is None or isinstance( - self.obj, ABCSeries # type: ignore[attr-defined] - ): - # error: "SelectionMixin" has no attribute "obj" - return self.obj # type: ignore[attr-defined] + if self._selection is None or isinstance(self.obj, ABCSeries): + return self.obj else: - # error: "SelectionMixin" has no attribute "obj" - return self.obj[self._selection] # type: ignore[attr-defined] + return self.obj[self._selection] @cache_readonly def ndim(self) -> int: @@ -211,49 +211,31 @@ def ndim(self) -> int: @final @cache_readonly def _obj_with_exclusions(self): - # error: "SelectionMixin" has no attribute "obj" - if self._selection is not None and isinstance( - self.obj, ABCDataFrame # type: ignore[attr-defined] - ): - # error: "SelectionMixin" has no attribute "obj" - return self.obj.reindex( # type: ignore[attr-defined] - columns=self._selection_list - ) + if self._selection is not None and isinstance(self.obj, ABCDataFrame): + return self.obj.reindex(columns=self._selection_list) - # error: "SelectionMixin" has no attribute "exclusions" - if len(self.exclusions) > 0: # type: ignore[attr-defined] - # error: "SelectionMixin" has no attribute "obj" - # error: "SelectionMixin" has no attribute "exclusions" - return self.obj.drop(self.exclusions, axis=1) # type: ignore[attr-defined] + if len(self.exclusions) > 0: + return self.obj.drop(self.exclusions, axis=1) else: - # error: "SelectionMixin" has no attribute "obj" - return self.obj # type: ignore[attr-defined] + return self.obj def __getitem__(self, key): if self._selection is not None: raise IndexError(f"Column(s) {self._selection} already selected") if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)): - # error: "SelectionMixin" has no attribute "obj" - if len( - self.obj.columns.intersection(key) # type: ignore[attr-defined] - ) != len(key): - # error: "SelectionMixin" has no attribute "obj" - bad_keys = list( - set(key).difference(self.obj.columns) # type: ignore[attr-defined] - ) + if len(self.obj.columns.intersection(key)) != len(key): + bad_keys = list(set(key).difference(self.obj.columns)) raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}") return self._gotitem(list(key), ndim=2) elif not getattr(self, "as_index", False): - # error: "SelectionMixin" has no attribute "obj" - if key not in self.obj.columns: # type: ignore[attr-defined] + if key not in self.obj.columns: raise KeyError(f"Column not found: {key}") return self._gotitem(key, ndim=2) else: - # error: "SelectionMixin" has no attribute "obj" - if key not in self.obj: # type: ignore[attr-defined] + if key not in self.obj: raise KeyError(f"Column not found: {key}") return self._gotitem(key, ndim=1) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 1105c1bd1d782..0a176dafb34bb 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -20,7 +20,6 @@ class providing the base-class of operations. from typing import ( TYPE_CHECKING, Callable, - Generic, Hashable, Iterable, Iterator, @@ -567,7 +566,7 @@ def group_selection_context(groupby: GroupBy) -> Iterator[GroupBy]: ] -class BaseGroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]): +class BaseGroupBy(PandasObject, SelectionMixin[FrameOrSeries]): _group_selection: IndexLabel | None = None _apply_allowlist: frozenset[str] = frozenset() _hidden_attrs = PandasObject._hidden_attrs | { @@ -588,7 +587,6 @@ class BaseGroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]): axis: int grouper: ops.BaseGrouper - obj: FrameOrSeries group_keys: bool @final @@ -840,7 +838,6 @@ class GroupBy(BaseGroupBy[FrameOrSeries]): more """ - obj: FrameOrSeries grouper: ops.BaseGrouper as_index: bool @@ -852,7 +849,7 @@ def __init__( axis: int = 0, level: IndexLabel | None = None, grouper: ops.BaseGrouper | None = None, - exclusions: set[Hashable] | None = None, + exclusions: frozenset[Hashable] | None = None, selection: IndexLabel | None = None, as_index: bool = True, sort: bool = True, @@ -901,7 +898,7 @@ def __init__( self.obj = obj self.axis = obj._get_axis_number(axis) self.grouper = grouper - self.exclusions = exclusions or set() + self.exclusions = frozenset(exclusions) if exclusions else frozenset() def __getattr__(self, attr: str): if attr in self._internal_names_set: diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index f1762a2535ff7..4650dbea27de1 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -652,7 +652,7 @@ def get_grouper( mutated: bool = False, validate: bool = True, dropna: bool = True, -) -> tuple[ops.BaseGrouper, set[Hashable], FrameOrSeries]: +) -> tuple[ops.BaseGrouper, frozenset[Hashable], FrameOrSeries]: """ Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. @@ -728,13 +728,13 @@ def get_grouper( if isinstance(key, Grouper): binner, grouper, obj = key._get_grouper(obj, validate=False) if key.key is None: - return grouper, set(), obj + return grouper, frozenset(), obj else: - return grouper, {key.key}, obj + return grouper, frozenset({key.key}), obj # already have a BaseGrouper, just return it elif isinstance(key, ops.BaseGrouper): - return key, set(), obj + return key, frozenset(), obj if not isinstance(key, list): keys = [key] @@ -861,7 +861,7 @@ def is_in_obj(gpr) -> bool: grouper = ops.BaseGrouper( group_axis, groupings, sort=sort, mutated=mutated, dropna=dropna ) - return grouper, exclusions, obj + return grouper, frozenset(exclusions), obj def _is_label_like(val) -> bool: diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index b51875134c614..490cdca8519e6 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -13,6 +13,7 @@ TYPE_CHECKING, Any, Callable, + Hashable, ) import warnings @@ -109,7 +110,7 @@ class BaseWindow(SelectionMixin): """Provides utilities for performing windowing operations.""" _attributes: list[str] = [] - exclusions: set[str] = set() + exclusions: frozenset[Hashable] = frozenset() def __init__( self,
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry @simonjayhawkins thoughts on the FrameOrSeries vs FrameOrSeriesUnion that ive ignored here?
https://api.github.com/repos/pandas-dev/pandas/pulls/41384
2021-05-08T18:37:08Z
2021-05-10T23:28:48Z
2021-05-10T23:28:48Z
2021-05-11T00:13:48Z
DOC: Remove deprecated example for astype
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d225ac6e6881b..c61fe49663920 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5686,6 +5686,14 @@ def astype( to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. + Notes + ----- + .. deprecated:: 1.3.0 + + Using ``astype`` to convert from timezone-naive dtype to + timezone-aware dtype is deprecated and will raise in a + future version. Use :meth:`Series.dt.tz_localize` instead. + Examples -------- Create a DataFrame: @@ -5761,15 +5769,6 @@ def astype( 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] - - Datetimes are localized to UTC first before - converting to the specified timezone: - - >>> ser_date.astype('datetime64[ns, US/Eastern]') - 0 2019-12-31 19:00:00-05:00 - 1 2020-01-01 19:00:00-05:00 - 2 2020-01-02 19:00:00-05:00 - dtype: datetime64[ns, US/Eastern] """ if is_dict_like(dtype): if self.ndim == 1: # i.e. Series
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Followup to #39258
https://api.github.com/repos/pandas-dev/pandas/pulls/41381
2021-05-08T15:23:40Z
2021-05-10T23:29:35Z
2021-05-10T23:29:35Z
2021-05-11T02:00:13Z
TST/CLN: use dtype fixture in numeric tests
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 857b136b67a0c..1cef932f7bf0a 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -39,7 +39,7 @@ class Base: _index_cls: Type[Index] @pytest.fixture - def simple_index(self) -> Index: + def simple_index(self): raise NotImplementedError("Method not implemented") def create_index(self) -> Index: @@ -772,6 +772,12 @@ class NumericBase(Base): Base class for numeric index (incl. RangeIndex) sub-class tests. """ + def test_constructor_unwraps_index(self, dtype): + idx = Index([1, 2], dtype=dtype) + result = self._index_cls(idx) + expected = np.array([1, 2], dtype=dtype) + tm.assert_numpy_array_equal(result._data, expected) + def test_where(self): # Tested in numeric.test_indexing pass diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py index e63aeba54fccd..4c2c38df601ce 100644 --- a/pandas/tests/indexes/numeric/test_numeric.py +++ b/pandas/tests/indexes/numeric/test_numeric.py @@ -17,7 +17,10 @@ class TestFloat64Index(NumericBase): _index_cls = Float64Index - _dtype = np.float64 + + @pytest.fixture + def dtype(self, request): + return np.float64 @pytest.fixture( params=["int64", "uint64", "category", "datetime64"], @@ -26,8 +29,8 @@ def invalid_dtype(self, request): return request.param @pytest.fixture - def simple_index(self) -> Index: - values = np.arange(5, dtype=self._dtype) + def simple_index(self, dtype): + values = np.arange(5, dtype=dtype) return self._index_cls(values) @pytest.fixture( @@ -65,9 +68,8 @@ def check_coerce(self, a, b, is_float_index=True): else: self.check_is_index(b) - def test_constructor(self): + def test_constructor(self, dtype): index_cls = self._index_cls - dtype = self._dtype # explicit construction index = index_cls([1, 2, 3, 4, 5]) @@ -204,8 +206,7 @@ def test_equals_numeric_other_index_type(self, other): pd.timedelta_range("1 Day", periods=3), ], ) - def test_lookups_datetimelike_values(self, vals): - dtype = self._dtype + def test_lookups_datetimelike_values(self, vals, dtype): # If we have datetime64 or timedelta64 values, make sure they are # wrappped correctly GH#31163 @@ -277,14 +278,14 @@ def test_fillna_float64(self): class NumericInt(NumericBase): - def test_view(self): + def test_view(self, dtype): index_cls = self._index_cls idx = index_cls([], name="Foo") idx_view = idx.view() assert idx_view.name == "Foo" - idx_view = idx.view(self._dtype) + idx_view = idx.view(dtype) tm.assert_index_equal(idx, index_cls(idx_view, name="Foo")) idx_view = idx.view(index_cls) @@ -334,7 +335,7 @@ def test_logical_compat(self, simple_index): assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() - def test_identical(self, simple_index): + def test_identical(self, simple_index, dtype): index = simple_index idx = Index(index.copy()) @@ -351,7 +352,7 @@ def test_identical(self, simple_index): assert not idx.identical(index) assert Index(same_values, name="foo", dtype=object).identical(idx) - assert not index.astype(dtype=object).identical(index.astype(dtype=self._dtype)) + assert not index.astype(dtype=object).identical(index.astype(dtype=dtype)) def test_cant_or_shouldnt_cast(self): msg = ( @@ -380,7 +381,10 @@ def test_prevent_casting(self, simple_index): class TestInt64Index(NumericInt): _index_cls = Int64Index - _dtype = np.int64 + + @pytest.fixture + def dtype(self): + return np.int64 @pytest.fixture( params=["uint64", "float64", "category", "datetime64"], @@ -389,8 +393,8 @@ def invalid_dtype(self, request): return request.param @pytest.fixture - def simple_index(self) -> Index: - return self._index_cls(range(0, 20, 2), dtype=self._dtype) + def simple_index(self, dtype): + return self._index_cls(range(0, 20, 2), dtype=dtype) @pytest.fixture( params=[range(0, 20, 2), range(19, -1, -1)], ids=["index_inc", "index_dec"] @@ -398,9 +402,8 @@ def simple_index(self) -> Index: def index(self, request): return self._index_cls(request.param) - def test_constructor(self): + def test_constructor(self, dtype): index_cls = self._index_cls - dtype = self._dtype # pass list, coerce fine index = index_cls([-5, 0, 1, 2]) @@ -439,9 +442,8 @@ def test_constructor(self): ]: tm.assert_index_equal(idx, expected) - def test_constructor_corner(self): + def test_constructor_corner(self, dtype): index_cls = self._index_cls - dtype = self._dtype arr = np.array([1, 2, 3, 4], dtype=object) index = index_cls(arr) @@ -465,12 +467,6 @@ def test_constructor_coercion_signed_to_unsigned(self, uint_dtype): with pytest.raises(OverflowError, match=msg): Index([-1], dtype=uint_dtype) - def test_constructor_unwraps_index(self): - idx = Index([1, 2]) - result = self._index_cls(idx) - expected = np.array([1, 2], dtype=self._dtype) - tm.assert_numpy_array_equal(result._data, expected) - def test_coerce_list(self): # coerce things arr = Index([1, 2, 3, 4]) @@ -478,13 +474,16 @@ def test_coerce_list(self): # but not if explicit dtype passed arr = Index([1, 2, 3, 4], dtype=object) - assert isinstance(arr, Index) + assert type(arr) is Index class TestUInt64Index(NumericInt): _index_cls = UInt64Index - _dtype = np.uint64 + + @pytest.fixture + def dtype(self): + return np.uint64 @pytest.fixture( params=["int64", "float64", "category", "datetime64"], @@ -493,9 +492,9 @@ def invalid_dtype(self, request): return request.param @pytest.fixture - def simple_index(self) -> Index: + def simple_index(self, dtype): # compat with shared Int64/Float64 tests - return self._index_cls(np.arange(5, dtype=self._dtype)) + return self._index_cls(np.arange(5, dtype=dtype)) @pytest.fixture( params=[ @@ -507,9 +506,8 @@ def simple_index(self) -> Index: def index(self, request): return self._index_cls(request.param) - def test_constructor(self): + def test_constructor(self, dtype): index_cls = self._index_cls - dtype = self._dtype idx = index_cls([1, 2, 3]) res = Index([1, 2, 3], dtype=dtype) diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 12fce56ffcb21..d9b093cc97fda 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -23,6 +23,10 @@ class TestRangeIndex(NumericBase): _index_cls = RangeIndex + @pytest.fixture + def dtype(self): + return np.int64 + @pytest.fixture( params=["uint64", "float64", "category", "datetime64"], ) @@ -43,6 +47,11 @@ def simple_index(self) -> Index: def index(self, request): return request.param + def test_constructor_unwraps_index(self, dtype): + result = self._index_cls(1, 3) + expected = np.array([1, 2], dtype=dtype) + tm.assert_numpy_array_equal(result._data, expected) + def test_can_hold_identifiers(self, simple_index): idx = simple_index key = idx[0]
small cleanup to make tests amenable to tests againt several dtypes.
https://api.github.com/repos/pandas-dev/pandas/pulls/41380
2021-05-08T08:46:25Z
2021-05-10T23:30:42Z
2021-05-10T23:30:42Z
2021-05-10T23:42:46Z
CLN: groupby assorted
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 9287163053cac..df0a413b7a76a 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -90,7 +90,6 @@ MultiIndex, all_indexes_same, ) -import pandas.core.indexes.base as ibase from pandas.core.series import Series from pandas.core.util.numba_ import maybe_use_numba @@ -482,14 +481,13 @@ def _get_index() -> Index: if isinstance(values[0], dict): # GH #823 #24880 index = _get_index() - result: FrameOrSeriesUnion = self._reindex_output( - self.obj._constructor_expanddim(values, index=index) - ) + res_df = self.obj._constructor_expanddim(values, index=index) + res_df = self._reindex_output(res_df) # if self.observed is False, # keep all-NaN rows created while re-indexing - result = result.stack(dropna=self.observed) - result.name = self._selection_name - return result + res_ser = res_df.stack(dropna=self.observed) + res_ser.name = self._selection_name + return res_ser elif isinstance(values[0], (Series, DataFrame)): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) else: @@ -1000,13 +998,18 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) # grouper specific aggregations if self.grouper.nkeys > 1: + # test_groupby_as_index_series_scalar gets here with 'not self.as_index' return self._python_agg_general(func, *args, **kwargs) elif args or kwargs: + # test_pass_args_kwargs gets here (with and without as_index) + # can't return early result = self._aggregate_frame(func, *args, **kwargs) elif self.axis == 1: # _aggregate_multiple_funcs does not allow self.axis == 1 + # Note: axis == 1 precludes 'not self.as_index', see __init__ result = self._aggregate_frame(func) + return result else: @@ -1036,7 +1039,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) if not self.as_index: self._insert_inaxis_grouper_inplace(result) - result.index = np.arange(len(result)) + result.index = Index(range(len(result))) return result._convert(datetime=True) @@ -1162,7 +1165,9 @@ def _wrap_applied_output(self, data, keys, values, not_indexed_same=False): if self.as_index: return self.obj._constructor_sliced(values, index=key_index) else: - result = DataFrame(values, index=key_index, columns=[self._selection]) + result = self.obj._constructor( + values, index=key_index, columns=[self._selection] + ) self._insert_inaxis_grouper_inplace(result) return result else: @@ -1611,8 +1616,8 @@ def _wrap_transformed_output( def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame: if not self.as_index: - index = np.arange(mgr.shape[1]) - mgr.set_axis(1, ibase.Index(index)) + index = Index(range(mgr.shape[1])) + mgr.set_axis(1, index) result = self.obj._constructor(mgr) self._insert_inaxis_grouper_inplace(result) @@ -1761,7 +1766,7 @@ def nunique(self, dropna: bool = True) -> DataFrame: results._get_axis(other_axis).names = obj._get_axis(other_axis).names if not self.as_index: - results.index = ibase.default_index(len(results)) + results.index = Index(range(len(results))) self._insert_inaxis_grouper_inplace(results) return results diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 46b47bc29d8a6..3045451974ee7 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -889,9 +889,8 @@ def codes_info(self) -> np.ndarray: @final def _get_compressed_codes(self) -> tuple[np.ndarray, np.ndarray]: - all_codes = self.codes - if len(all_codes) > 1: - group_index = get_group_index(all_codes, self.shape, sort=True, xnull=True) + if len(self.groupings) > 1: + group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True) return compress_group_index(group_index, sort=self.sort) ping = self.groupings[0] @@ -1111,6 +1110,7 @@ def groups(self): @property def nkeys(self) -> int: + # still matches len(self.groupings), but we can hard-code return 1 def _get_grouper(self): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index f716a3a44cd54..44d48c45e1fd1 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -234,17 +234,18 @@ def f(x, q=None, axis=0): tm.assert_series_equal(trans_result, trans_expected) # DataFrame - df_grouped = tsframe.groupby(lambda x: x.month) - agg_result = df_grouped.agg(np.percentile, 80, axis=0) - apply_result = df_grouped.apply(DataFrame.quantile, 0.8) - expected = df_grouped.quantile(0.8) - tm.assert_frame_equal(apply_result, expected, check_names=False) - tm.assert_frame_equal(agg_result, expected) - - agg_result = df_grouped.agg(f, q=80) - apply_result = df_grouped.apply(DataFrame.quantile, q=0.8) - tm.assert_frame_equal(agg_result, expected) - tm.assert_frame_equal(apply_result, expected, check_names=False) + for as_index in [True, False]: + df_grouped = tsframe.groupby(lambda x: x.month, as_index=as_index) + agg_result = df_grouped.agg(np.percentile, 80, axis=0) + apply_result = df_grouped.apply(DataFrame.quantile, 0.8) + expected = df_grouped.quantile(0.8) + tm.assert_frame_equal(apply_result, expected, check_names=False) + tm.assert_frame_equal(agg_result, expected) + + agg_result = df_grouped.agg(f, q=80) + apply_result = df_grouped.apply(DataFrame.quantile, q=0.8) + tm.assert_frame_equal(agg_result, expected) + tm.assert_frame_equal(apply_result, expected, check_names=False) def test_len():
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41379
2021-05-08T05:39:35Z
2021-05-10T23:31:58Z
2021-05-10T23:31:58Z
2021-05-10T23:59:05Z
DEPR: kind kwarg in _maybe_cast_slice_bound
diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst index 7f2419bc7f19d..71aef4fdd75f6 100644 --- a/doc/source/user_guide/scale.rst +++ b/doc/source/user_guide/scale.rst @@ -345,6 +345,7 @@ we need to supply the divisions manually. Now we can do things like fast random access with ``.loc``. .. ipython:: python + :okwarning: ddf.loc["2002-01-01 12:01":"2002-01-01 12:05"].compute() diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 84f1245299d53..539881deff705 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3685,7 +3685,7 @@ def is_int(v): ) indexer = key else: - indexer = self.slice_indexer(start, stop, step, kind=kind) + indexer = self.slice_indexer(start, stop, step) return indexer @@ -5648,7 +5648,7 @@ def slice_indexer( >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ - start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind) + start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): @@ -5678,7 +5678,7 @@ def _validate_indexer(self, form: str_t, key, kind: str_t): if key is not None and not is_integer(key): raise self._invalid_indexer(form, key) - def _maybe_cast_slice_bound(self, label, side: str_t, kind): + def _maybe_cast_slice_bound(self, label, side: str_t, kind=no_default): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing @@ -5698,7 +5698,8 @@ def _maybe_cast_slice_bound(self, label, side: str_t, kind): ----- Value of `side` parameter should be validated in caller. """ - assert kind in ["loc", "getitem", None] + assert kind in ["loc", "getitem", None, no_default] + self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound") # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. Float64Index and @@ -5723,7 +5724,7 @@ def _searchsorted_monotonic(self, label, side: str_t = "left"): raise ValueError("index must be monotonic increasing or decreasing") - def get_slice_bound(self, label, side: str_t, kind) -> int: + def get_slice_bound(self, label, side: str_t, kind=None) -> int: """ Calculate slice bound that corresponds to given label. @@ -5753,7 +5754,7 @@ def get_slice_bound(self, label, side: str_t, kind) -> int: # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. - label = self._maybe_cast_slice_bound(label, side, kind) + label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: @@ -5843,13 +5844,13 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): start_slice = None if start is not None: - start_slice = self.get_slice_bound(start, "left", kind) + start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: - end_slice = self.get_slice_bound(end, "right", kind) + end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) @@ -6181,6 +6182,18 @@ def shape(self) -> Shape: # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) + def _deprecated_arg(self, value, name: str_t, methodname: str_t) -> None: + """ + Issue a FutureWarning if the arg/kwarg is not no_default. + """ + if value is not no_default: + warnings.warn( + f"'{name}' argument in {methodname} is deprecated " + "and will be removed in a future version. Do not pass it.", + FutureWarning, + stacklevel=3, + ) + def ensure_index_from_sequences(sequences, names=None): """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f77f28deecf57..e8b21f3cec668 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -724,7 +724,7 @@ def _maybe_cast_for_get_loc(self, key) -> Timestamp: key = key.tz_convert(self.tz) return key - def _maybe_cast_slice_bound(self, label, side: str, kind): + def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): """ If label is a string, cast it to datetime according to resolution. @@ -742,7 +742,8 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): ----- Value of `side` parameter should be validated in caller. """ - assert kind in ["loc", "getitem", None] + assert kind in ["loc", "getitem", None, lib.no_default] + self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound") if isinstance(label, str): freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None)) @@ -823,12 +824,12 @@ def check_str_or_none(point): mask = np.array(True) deprecation_mask = np.array(True) if start is not None: - start_casted = self._maybe_cast_slice_bound(start, "left", kind) + start_casted = self._maybe_cast_slice_bound(start, "left") mask = start_casted <= self deprecation_mask = start_casted == self if end is not None: - end_casted = self._maybe_cast_slice_bound(end, "right", kind) + end_casted = self._maybe_cast_slice_bound(end, "right") mask = (self <= end_casted) & mask deprecation_mask = (end_casted == self) | deprecation_mask diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index d7b5f66bd385f..fc92a1b3afe53 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -825,8 +825,9 @@ def _should_fallback_to_positional(self) -> bool: # positional in this case return self.dtype.subtype.kind in ["m", "M"] - def _maybe_cast_slice_bound(self, label, side: str, kind): - return getattr(self, side)._maybe_cast_slice_bound(label, side, kind) + def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): + self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound") + return getattr(self, side)._maybe_cast_slice_bound(label, side) @Appender(Index._convert_list_indexer.__doc__) def _convert_list_indexer(self, keyarr): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a68238af003e4..d143af4e53c60 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2716,7 +2716,7 @@ def _get_indexer( return ensure_platform_int(indexer) def get_slice_bound( - self, label: Hashable | Sequence[Hashable], side: str, kind: str + self, label: Hashable | Sequence[Hashable], side: str, kind: str | None = None ) -> int: """ For an ordered MultiIndex, compute slice bound @@ -2729,7 +2729,7 @@ def get_slice_bound( ---------- label : object or tuple of objects side : {'left', 'right'} - kind : {'loc', 'getitem'} + kind : {'loc', 'getitem', None} Returns ------- @@ -2747,13 +2747,13 @@ def get_slice_bound( Get the locations from the leftmost 'b' in the first level until the end of the multiindex: - >>> mi.get_slice_bound('b', side="left", kind="loc") + >>> mi.get_slice_bound('b', side="left") 1 Like above, but if you get the locations from the rightmost 'b' in the first level and 'f' in the second level: - >>> mi.get_slice_bound(('b','f'), side="right", kind="loc") + >>> mi.get_slice_bound(('b','f'), side="right") 3 See Also @@ -2820,7 +2820,7 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): """ # This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. - return super().slice_locs(start, end, step, kind=kind) + return super().slice_locs(start, end, step) def _partial_tup_index(self, tup, side="left"): if len(tup) > self._lexsort_depth: @@ -3206,9 +3206,7 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes): # we have a partial slice (like looking up a partial date # string) - start = stop = level_index.slice_indexer( - key.start, key.stop, key.step, kind="loc" - ) + start = stop = level_index.slice_indexer(key.start, key.stop, key.step) step = start.step if isinstance(start, slice) or isinstance(stop, slice): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 28f563764ef10..88b0b019324ea 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -112,8 +112,9 @@ def _validate_dtype(cls, dtype: Dtype | None) -> None: # Indexing Methods @doc(Index._maybe_cast_slice_bound) - def _maybe_cast_slice_bound(self, label, side: str, kind): - assert kind in ["loc", "getitem", None] + def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): + assert kind in ["loc", "getitem", None, lib.no_default] + self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound") # we will try to coerce to integers return self._maybe_cast_indexer(label) @@ -346,7 +347,7 @@ def _convert_slice_indexer(self, key: slice, kind: str): # We always treat __getitem__ slicing as label-based # translate to locations - return self.slice_indexer(key.start, key.stop, key.step, kind=kind) + return self.slice_indexer(key.start, key.stop, key.step) # ---------------------------------------------------------------- diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 18e441ef165c9..136843938b683 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -531,7 +531,7 @@ def get_loc(self, key, method=None, tolerance=None): except KeyError as err: raise KeyError(orig_key) from err - def _maybe_cast_slice_bound(self, label, side: str, kind: str): + def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): """ If label is a string or a datetime, cast it to Period.ordinal according to resolution. @@ -540,7 +540,7 @@ def _maybe_cast_slice_bound(self, label, side: str, kind: str): ---------- label : object side : {'left', 'right'} - kind : {'loc', 'getitem'} + kind : {'loc', 'getitem'}, or None Returns ------- @@ -551,7 +551,8 @@ def _maybe_cast_slice_bound(self, label, side: str, kind: str): Value of `side` parameter should be validated in caller. """ - assert kind in ["loc", "getitem"] + assert kind in ["loc", "getitem", None, lib.no_default] + self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound") if isinstance(label, datetime): return Period(label, freq=self.freq) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index a23dd10bc3c0e..ec97fa1e05851 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -192,7 +192,7 @@ def get_loc(self, key, method=None, tolerance=None): return Index.get_loc(self, key, method, tolerance) - def _maybe_cast_slice_bound(self, label, side: str, kind): + def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): """ If label is a string, cast it to timedelta according to resolution. @@ -206,7 +206,8 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): ------- label : object """ - assert kind in ["loc", "getitem", None] + assert kind in ["loc", "getitem", None, lib.no_default] + self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound") if isinstance(label, str): parsed = Timedelta(label) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 96aeda955df01..3d36387588e73 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1170,9 +1170,7 @@ def _get_slice_axis(self, slice_obj: slice, axis: int): return obj.copy(deep=False) labels = obj._get_axis(axis) - indexer = labels.slice_indexer( - slice_obj.start, slice_obj.stop, slice_obj.step, kind="loc" - ) + indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step) if isinstance(indexer, slice): return self.obj._slice(indexer, axis=axis) diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 3da6414332cb8..2773543b74764 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -679,18 +679,18 @@ def test_maybe_cast_slice_bounds_empty(self): # GH#14354 empty_idx = date_range(freq="1H", periods=0, end="2015") - right = empty_idx._maybe_cast_slice_bound("2015-01-02", "right", "loc") + right = empty_idx._maybe_cast_slice_bound("2015-01-02", "right") exp = Timestamp("2015-01-02 23:59:59.999999999") assert right == exp - left = empty_idx._maybe_cast_slice_bound("2015-01-02", "left", "loc") + left = empty_idx._maybe_cast_slice_bound("2015-01-02", "left") exp = Timestamp("2015-01-02 00:00:00") assert left == exp def test_maybe_cast_slice_duplicate_monotonic(self): # https://github.com/pandas-dev/pandas/issues/16515 idx = DatetimeIndex(["2017", "2017"]) - result = idx._maybe_cast_slice_bound("2017-01-01", "left", "loc") + result = idx._maybe_cast_slice_bound("2017-01-01", "left") expected = Timestamp("2017-01-01") assert result == expected diff --git a/pandas/tests/indexes/period/test_partial_slicing.py b/pandas/tests/indexes/period/test_partial_slicing.py index b0e573250d02e..148999d90d554 100644 --- a/pandas/tests/indexes/period/test_partial_slicing.py +++ b/pandas/tests/indexes/period/test_partial_slicing.py @@ -110,9 +110,9 @@ def test_maybe_cast_slice_bound(self, make_range, frame_or_series): # Check the lower-level calls are raising where expected. with pytest.raises(TypeError, match=msg): - idx._maybe_cast_slice_bound("foo", "left", "loc") + idx._maybe_cast_slice_bound("foo", "left") with pytest.raises(TypeError, match=msg): - idx.get_slice_bound("foo", "left", "loc") + idx.get_slice_bound("foo", "left") with pytest.raises(TypeError, match=msg): obj["2013/09/30":"foo"] diff --git a/pandas/tests/indexes/test_indexing.py b/pandas/tests/indexes/test_indexing.py index 8d2637e4a06f6..379c766b94d6c 100644 --- a/pandas/tests/indexes/test_indexing.py +++ b/pandas/tests/indexes/test_indexing.py @@ -259,3 +259,16 @@ def test_getitem_deprecated_float(idx): expected = idx[1] assert result == expected + + +def test_maybe_cast_slice_bound_kind_deprecated(index): + if not len(index): + return + + with tm.assert_produces_warning(FutureWarning): + # passed as keyword + index._maybe_cast_slice_bound(index[0], "left", kind="loc") + + with tm.assert_produces_warning(FutureWarning): + # pass as positional + index._maybe_cast_slice_bound(index[0], "left", "loc")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry When I tried just removing it that broke some dask tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/41378
2021-05-08T04:29:48Z
2021-05-10T23:33:09Z
2021-05-10T23:33:09Z
2021-07-22T21:59:56Z
REF: document casting behavior in groupby
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 7796df98395a7..123b9e3350fda 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -539,9 +539,6 @@ def _transform_general(self, func: Callable, *args, **kwargs) -> Series: object.__setattr__(group, "name", name) res = func(group, *args, **kwargs) - if isinstance(res, (DataFrame, Series)): - res = res._values - results.append(klass(res, index=group.index)) # check for empty "results" to avoid concat ValueError @@ -1251,12 +1248,11 @@ def _wrap_applied_output_series( columns = key_index stacked_values = stacked_values.T + if stacked_values.dtype == object: + # We'll have the DataFrame constructor do inference + stacked_values = stacked_values.tolist() result = self.obj._constructor(stacked_values, index=index, columns=columns) - # if we have date/time like in the original, then coerce dates - # as we are stacking can easily have object dtypes here - result = result._convert(datetime=True) - if not self.as_index: self._insert_inaxis_grouper_inplace(result) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 2091d2fc484e1..0b07668a9fea2 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1329,7 +1329,10 @@ def _agg_py_fallback( # reductions; see GH#28949 ser = df.iloc[:, 0] - res_values = self.grouper.agg_series(ser, alt) + # We do not get here with UDFs, so we know that our dtype + # should always be preserved by the implemented aggregations + # TODO: Is this exactly right; see WrappedCythonOp get_result_dtype? + res_values = self.grouper.agg_series(ser, alt, preserve_dtype=True) if isinstance(values, Categorical): # Because we only get here with known dtype-preserving diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 3045451974ee7..8b6136b3abc42 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -966,11 +966,24 @@ def _cython_operation( ) @final - def agg_series(self, obj: Series, func: F) -> ArrayLike: + def agg_series( + self, obj: Series, func: F, preserve_dtype: bool = False + ) -> ArrayLike: + """ + Parameters + ---------- + obj : Series + func : function taking a Series and returning a scalar-like + preserve_dtype : bool + Whether the aggregation is known to be dtype-preserving. + + Returns + ------- + np.ndarray or ExtensionArray + """ # test_groupby_empty_with_category gets here with self.ngroups == 0 # and len(obj) > 0 - cast_back = True if len(obj) == 0: # SeriesGrouper would raise if we were to call _aggregate_series_fast result = self._aggregate_series_pure_python(obj, func) @@ -982,17 +995,21 @@ def agg_series(self, obj: Series, func: F) -> ArrayLike: # TODO: can we get a performant workaround for EAs backed by ndarray? result = self._aggregate_series_pure_python(obj, func) + # we can preserve a little bit more aggressively with EA dtype + # because maybe_cast_pointwise_result will do a try/except + # with _from_sequence. NB we are assuming here that _from_sequence + # is sufficiently strict that it casts appropriately. + preserve_dtype = True + elif obj.index._has_complex_internals: # Preempt TypeError in _aggregate_series_fast result = self._aggregate_series_pure_python(obj, func) else: result = self._aggregate_series_fast(obj, func) - cast_back = False npvalues = lib.maybe_convert_objects(result, try_float=False) - if cast_back: - # TODO: Is there a documented reason why we dont always cast_back? + if preserve_dtype: out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True) else: out = npvalues
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41376
2021-05-08T01:02:29Z
2021-05-17T19:18:11Z
2021-05-17T19:18:11Z
2021-05-17T19:25:12Z
REF: do less in Grouping.__init__
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index cb5b54ca0c598..d4d20809ada85 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -777,11 +777,7 @@ def apply_series_value_counts(): # multi-index components codes = self.grouper.reconstructed_codes codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)] - # error: List item 0 has incompatible type "Union[ndarray, Any]"; - # expected "Index" - levels = [ping.group_index for ping in self.grouper.groupings] + [ - lev # type: ignore[list-item] - ] + levels = [ping.group_index for ping in self.grouper.groupings] + [lev] names = self.grouper.names + [self.obj.name] if dropna: diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 1b5c11b363457..598750475f3e8 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -438,6 +438,9 @@ class Grouping: * groups : dict of {group -> label_list} """ + _codes: np.ndarray | None = None + _group_index: Index | None = None + def __init__( self, index: Index, @@ -461,6 +464,8 @@ def __init__( self.in_axis = in_axis self.dropna = dropna + self._passed_categorical = False + # right place for this? if isinstance(grouper, (Series, Index)) and name is None: self.name = grouper.name @@ -468,20 +473,16 @@ def __init__( # we have a single grouper which may be a myriad of things, # some of which are dependent on the passing in level - if level is not None: - if not isinstance(level, int): - if level not in index.names: - raise AssertionError(f"Level {level} not in index") - level = index.names.index(level) - + ilevel = self._ilevel + if ilevel is not None: if self.name is None: - self.name = index.names[level] + self.name = index.names[ilevel] ( - self.grouper, + self.grouper, # Index self._codes, self._group_index, - ) = index._get_grouper_for_level(self.grouper, level) + ) = index._get_grouper_for_level(self.grouper, ilevel) # a passed Grouper like, directly get the grouper in the same way # as single grouper groupby, use the group_info to get codes @@ -502,32 +503,13 @@ def __init__( self.grouper = grouper._get_grouper() else: - # a passed Categorical if is_categorical_dtype(self.grouper): + self._passed_categorical = True self.grouper, self.all_grouper = recode_for_groupby( self.grouper, self.sort, observed ) - categories = self.grouper.categories - - # we make a CategoricalIndex out of the cat grouper - # preserving the categories / ordered attributes - self._codes = self.grouper.codes - if observed: - codes = algorithms.unique1d(self.grouper.codes) - codes = codes[codes != -1] - if sort or self.grouper.ordered: - codes = np.sort(codes) - else: - codes = np.arange(len(categories)) - - self._group_index = CategoricalIndex( - Categorical.from_codes( - codes=codes, categories=categories, ordered=self.grouper.ordered - ), - name=self.name, - ) # we are done elif isinstance(self.grouper, Grouping): @@ -564,8 +546,20 @@ def __repr__(self) -> str: def __iter__(self): return iter(self.indices) - _codes: np.ndarray | None = None - _group_index: Index | None = None + @cache_readonly + def _ilevel(self) -> int | None: + """ + If necessary, converted index level name to index level position. + """ + level = self.level + if level is None: + return None + if not isinstance(level, int): + index = self.index + if level not in index.names: + raise AssertionError(f"Level {level} not in index") + return index.names.index(level) + return level @property def ngroups(self) -> int: @@ -582,6 +576,12 @@ def indices(self): @property def codes(self) -> np.ndarray: + if self._passed_categorical: + # we make a CategoricalIndex out of the cat grouper + # preserving the categories / ordered attributes + cat = self.grouper + return cat.codes + if self._codes is None: self._make_codes() # error: Incompatible return value type (got "Optional[ndarray]", @@ -592,12 +592,33 @@ def codes(self) -> np.ndarray: def result_index(self) -> Index: if self.all_grouper is not None: group_idx = self.group_index - assert isinstance(group_idx, CategoricalIndex) # set in __init__ + assert isinstance(group_idx, CategoricalIndex) return recode_from_groupby(self.all_grouper, self.sort, group_idx) return self.group_index - @property + @cache_readonly def group_index(self) -> Index: + if self._passed_categorical: + # we make a CategoricalIndex out of the cat grouper + # preserving the categories / ordered attributes + cat = self.grouper + categories = cat.categories + + if self.observed: + codes = algorithms.unique1d(cat.codes) + codes = codes[codes != -1] + if self.sort or cat.ordered: + codes = np.sort(codes) + else: + codes = np.arange(len(categories)) + + return CategoricalIndex( + Categorical.from_codes( + codes=codes, categories=categories, ordered=cat.ordered + ), + name=self.name, + ) + if self._group_index is None: self._make_codes() assert self._group_index is not None
making it incrementally easier to reason about what `self.grouper` is
https://api.github.com/repos/pandas-dev/pandas/pulls/41375
2021-05-07T21:53:45Z
2021-05-17T14:25:50Z
2021-05-17T14:25:50Z
2021-05-17T14:41:07Z
BUG: loc casting to float for scalar with MultiIndex df
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 258e391b9220c..9b96c6b95301d 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -872,6 +872,7 @@ Indexing - Bug in :meth:`DataFrame.__setitem__` and :meth:`DataFrame.iloc.__setitem__` raising ``ValueError`` when trying to index with a row-slice and setting a list as values (:issue:`40440`) - Bug in :meth:`DataFrame.loc` not raising ``KeyError`` when key was not found in :class:`MultiIndex` when levels contain more values than used (:issue:`41170`) - Bug in :meth:`DataFrame.loc.__setitem__` when setting-with-expansion incorrectly raising when the index in the expanding axis contains duplicates (:issue:`40096`) +- Bug in :meth:`DataFrame.loc.__getitem__` with :class:`MultiIndex` casting to float when at least one column is from has float dtype and we retrieve a scalar (:issue:`41369`) - Bug in :meth:`DataFrame.loc` incorrectly matching non-boolean index elements (:issue:`20432`) - Bug in :meth:`Series.__delitem__` with ``ExtensionDtype`` incorrectly casting to ``ndarray`` (:issue:`40386`) - Bug in :meth:`DataFrame.__setitem__` raising ``TypeError`` when using a str subclass as the column name with a :class:`DatetimeIndex` (:issue:`37366`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 0a06dff790cbf..be5b89f08b5ca 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -886,26 +886,22 @@ def _getitem_nested_tuple(self, tup: tuple): # handle the multi-axis by taking sections and reducing # this is iterative obj = self.obj - axis = 0 - for key in tup: + # GH#41369 Loop in reverse order ensures indexing along columns before rows + # which selects only necessary blocks which avoids dtype conversion if possible + axis = len(tup) - 1 + for key in tup[::-1]: if com.is_null_slice(key): - axis += 1 + axis -= 1 continue - current_ndim = obj.ndim obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) - axis += 1 + axis -= 1 # if we have a scalar, we are done if is_scalar(obj) or not hasattr(obj, "ndim"): break - # has the dim of the obj changed? - # GH 7199 - if obj.ndim < current_ndim: - axis -= 1 - return obj def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index 558270ac86532..f9e2d1280b33a 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -831,3 +831,16 @@ def test_mi_add_cell_missing_row_non_unique(): columns=MultiIndex.from_product([[1, 2], ["A", "B"]]), ) tm.assert_frame_equal(result, expected) + + +def test_loc_get_scalar_casting_to_float(): + # GH#41369 + df = DataFrame( + {"a": 1.0, "b": 2}, index=MultiIndex.from_arrays([[3], [4]], names=["c", "d"]) + ) + result = df.loc[(3, 4), "b"] + assert result == 2 + assert isinstance(result, np.int64) + result = df.loc[[(3, 4)], "b"].iloc[0] + assert result == 2 + assert isinstance(result, np.int64)
- [x] closes #41369 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry In theory this would solve the issue, but I am not sure if this is desirable. We cast the row to a series, which forces the dtype conversion. If we loop in reverse we retrieve a column as a series which would avoid the conversion. Would you mind having a look @jbrockmendel ?
https://api.github.com/repos/pandas-dev/pandas/pulls/41374
2021-05-07T21:53:12Z
2021-05-25T12:28:55Z
2021-05-25T12:28:54Z
2021-06-03T22:08:41Z
ENH: retain masked EA dtypes in groupby with as_index=False
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 08f30f467dfa7..c0f05108ff464 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -29,6 +29,7 @@ enhancement2 Other enhancements ^^^^^^^^^^^^^^^^^^ +- :class:`DataFrameGroupBy` operations with ``as_index=False`` now correctly retain ``ExtensionDtype`` dtypes for columns being grouped on (:issue:`41373`) - Add support for assigning values to ``by`` argument in :meth:`DataFrame.plot.hist` and :meth:`DataFrame.plot.box` (:issue:`15079`) - :meth:`Series.sample`, :meth:`DataFrame.sample`, and :meth:`.GroupBy.sample` now accept a ``np.random.Generator`` as input to ``random_state``. A generator will be more performant, especially with ``replace=False`` (:issue:`38100`) - Additional options added to :meth:`.Styler.bar` to control alignment and display, with keyword only arguments (:issue:`26070`, :issue:`36419`) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index a6be85bf2be2a..59c57cf4a1ea0 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1033,7 +1033,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) self._insert_inaxis_grouper_inplace(result) result.index = Index(range(len(result))) - return result._convert(datetime=True) + return result agg = aggregate @@ -1684,6 +1684,8 @@ def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame: if self.axis == 1: result = result.T + # Note: we only need to pass datetime=True in order to get numeric + # values converted return self._reindex_output(result)._convert(datetime=True) def _iterate_column_groupbys(self, obj: FrameOrSeries): diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 3307558deec33..76815d780a1ad 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -619,11 +619,20 @@ def group_arraylike(self) -> ArrayLike: Analogous to result_index, but holding an ArrayLike to ensure we can can retain ExtensionDtypes. """ + if self._group_index is not None: + # _group_index is set in __init__ for MultiIndex cases + return self._group_index._values + + elif self._all_grouper is not None: + # retain dtype for categories, including unobserved ones + return self.result_index._values + return self._codes_and_uniques[1] @cache_readonly def result_index(self) -> Index: - # TODO: what's the difference between result_index vs group_index? + # result_index retains dtype for categories, including unobserved ones, + # which group_index does not if self._all_grouper is not None: group_idx = self.group_index assert isinstance(group_idx, CategoricalIndex) @@ -635,7 +644,8 @@ def group_index(self) -> Index: if self._group_index is not None: # _group_index is set in __init__ for MultiIndex cases return self._group_index - uniques = self.group_arraylike + + uniques = self._codes_and_uniques[1] return Index(uniques, name=self.name) @cache_readonly diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 36fbda5974ea0..6d8881d12dbb7 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -885,6 +885,7 @@ def result_arraylike(self) -> ArrayLike: if len(self.groupings) == 1: return self.groupings[0].group_arraylike + # result_index is MultiIndex return self.result_index._values @cache_readonly @@ -903,12 +904,12 @@ def get_group_levels(self) -> list[ArrayLike]: # Note: only called from _insert_inaxis_grouper_inplace, which # is only called for BaseGrouper, never for BinGrouper if len(self.groupings) == 1: - return [self.groupings[0].result_index] + return [self.groupings[0].group_arraylike] name_list = [] for ping, codes in zip(self.groupings, self.reconstructed_codes): codes = ensure_platform_int(codes) - levels = ping.result_index.take(codes) + levels = ping.group_arraylike.take(codes) name_list.append(levels) diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py index 1a045fa33f487..4c8dc6ca1ad9c 100644 --- a/pandas/tests/extension/base/groupby.py +++ b/pandas/tests/extension/base/groupby.py @@ -22,14 +22,14 @@ def test_grouping_grouper(self, data_for_grouping): def test_groupby_extension_agg(self, as_index, data_for_grouping): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) result = df.groupby("B", as_index=as_index).A.mean() - _, index = pd.factorize(data_for_grouping, sort=True) + _, uniques = pd.factorize(data_for_grouping, sort=True) - index = pd.Index(index, name="B") - expected = pd.Series([3.0, 1.0, 4.0], index=index, name="A") if as_index: + index = pd.Index(uniques, name="B") + expected = pd.Series([3.0, 1.0, 4.0], index=index, name="A") self.assert_series_equal(result, expected) else: - expected = expected.reset_index() + expected = pd.DataFrame({"B": uniques, "A": [3.0, 1.0, 4.0]}) self.assert_frame_equal(result, expected) def test_groupby_agg_extension(self, data_for_grouping): diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index b8fa158083327..b5bb68e8a9a12 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -312,10 +312,6 @@ def test_groupby_extension_apply(self): we'll be able to dispatch unique. """ - @pytest.mark.parametrize("as_index", [True, False]) - def test_groupby_extension_agg(self, as_index, data_for_grouping): - super().test_groupby_extension_agg(as_index, data_for_grouping) - @pytest.mark.xfail(reason="GH#39098: Converts agg result to object") def test_groupby_agg_extension(self, data_for_grouping): super().test_groupby_agg_extension(data_for_grouping) diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py index 172137ff3a5a2..395540993dc15 100644 --- a/pandas/tests/extension/test_boolean.py +++ b/pandas/tests/extension/test_boolean.py @@ -269,14 +269,14 @@ def test_grouping_grouper(self, data_for_grouping): def test_groupby_extension_agg(self, as_index, data_for_grouping): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping}) result = df.groupby("B", as_index=as_index).A.mean() - _, index = pd.factorize(data_for_grouping, sort=True) + _, uniques = pd.factorize(data_for_grouping, sort=True) - index = pd.Index(index, name="B") - expected = pd.Series([3.0, 1.0], index=index, name="A") if as_index: + index = pd.Index(uniques, name="B") + expected = pd.Series([3.0, 1.0], index=index, name="A") self.assert_series_equal(result, expected) else: - expected = expected.reset_index() + expected = pd.DataFrame({"B": uniques, "A": [3.0, 1.0]}) self.assert_frame_equal(result, expected) def test_groupby_agg_extension(self, data_for_grouping): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index bcdb6817c0321..538a707aa3580 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -717,6 +717,10 @@ def test_ops_not_as_index(reduction_func): expected = expected.rename("size") expected = expected.reset_index() + if reduction_func != "size": + # 32 bit compat -> groupby preserves dtype whereas reset_index casts to int64 + expected["a"] = expected["a"].astype(df["a"].dtype) + g = df.groupby("a", as_index=False) result = getattr(g, reduction_func)()
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41373
2021-05-07T20:27:00Z
2021-07-25T14:23:16Z
2021-07-25T14:23:16Z
2021-07-25T14:42:04Z
[ArrowStringArray] REF: str.extract dispatch to array
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 9dbc47f1d40f7..902d1b7245b46 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -196,6 +196,7 @@ def map_infer_mask( convert: bool = ..., na_value: Any = ..., dtype: np.dtype = ..., + out: np.ndarray = ..., ) -> np.ndarray: ... def indices_fast( diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index e1cb744c7033c..58bd6cdc54b7d 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2536,7 +2536,8 @@ no_default = NoDefault.no_default # Sentinel indicating the default value. @cython.boundscheck(False) @cython.wraparound(False) def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask, bint convert=True, - object na_value=no_default, cnp.dtype dtype=np.dtype(object) + object na_value=no_default, cnp.dtype dtype=np.dtype(object), + ndarray out=None ) -> np.ndarray: """ Substitute for np.vectorize with pandas-friendly dtype inference. @@ -2554,6 +2555,8 @@ def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask, bint convert=Tr input value is used dtype : numpy.dtype The numpy dtype to use for the result ndarray. + out : ndarray + The result. Returns ------- @@ -2565,7 +2568,10 @@ def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask, bint convert=Tr object val n = len(arr) - result = np.empty(n, dtype=dtype) + if out is not None: + result = out + else: + result = np.empty(n, dtype=dtype) for i in range(n): if mask[i]: if na_value is no_default: diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index cb8a08f5668ac..95d9409b265ce 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2453,7 +2453,9 @@ def replace(self, to_replace, value, inplace: bool = False): # ------------------------------------------------------------------------ # String methods interface - def _str_map(self, f, na_value=np.nan, dtype=np.dtype("object")): + def _str_map( + self, f, na_value=np.nan, dtype=np.dtype("object"), convert: bool = True + ): # Optimization to apply the callable `f` to the categories once # and rebuild the result by `take`ing from the result with the codes. # Returns the same type as the object-dtype implementation though. diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 74ca5130ca322..ab1dadf4d2dfa 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -410,7 +410,9 @@ def _cmp_method(self, other, op): # String methods interface _str_na_value = StringDtype.na_value - def _str_map(self, f, na_value=None, dtype: Dtype | None = None): + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): from pandas.arrays import BooleanArray if dtype is None: diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index d5ee28eb7017e..1e2f382b2976f 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -741,7 +741,9 @@ def value_counts(self, dropna: bool = True) -> Series: _str_na_value = ArrowStringDtype.na_value - def _str_map(self, f, na_value=None, dtype: Dtype | None = None): + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): # TODO: de-duplicate with StringArray method. This method is moreless copy and # paste. diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 43df34a7ecbb2..a56c62074cc2a 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -15,10 +15,7 @@ import numpy as np import pandas._libs.lib as lib -from pandas._typing import ( - ArrayLike, - FrameOrSeriesUnion, -) +from pandas._typing import FrameOrSeriesUnion from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -162,7 +159,6 @@ class StringMethods(NoNewAttributesMixin): # TODO: Dispatch all the methods # Currently the following are not dispatched to the array # * cat - # * extract # * extractall def __init__(self, data): @@ -245,7 +241,7 @@ def _wrap_result( self, result, name=None, - expand=None, + expand: Optional[bool] = None, fill_value=np.nan, returns_string=True, ): @@ -2383,8 +2379,36 @@ def extract( if not expand and regex.groups > 1 and isinstance(self._data, ABCIndex): raise ValueError("only one regex group is supported with Index") - # TODO: dispatch - return str_extract(self, pat, flags, expand=expand) + result = self._data.array._str_extract(pat, flags, expand) + returns_df = regex.groups > 1 or expand + + name = _get_group_names(regex) if returns_df else _get_single_group_name(regex) + + # extract is inconsistent for Indexes when expand is True. To avoid special + # casing _wrap_result we handle that case here + if expand and isinstance(self._data, ABCIndex): + from pandas import DataFrame + + # if expand is True, name is a list of column names + assert isinstance(name, list) # for mypy + return DataFrame(result, columns=name, dtype=object) + + # bypass padding code in _wrap_result + expand_kwarg: Optional[bool] + if returns_df: + if is_object_dtype(result): + if regex.groups == 1: + result = result.reshape(1, -1).T + if result.size == 0: + expand_kwarg = True + else: + expand_kwarg = None + else: + expand_kwarg = True + else: + expand_kwarg = False + + return self._wrap_result(result, name=name, expand=expand_kwarg) @forbid_nonstring_types(["bytes"]) def extractall(self, pat, flags=0): @@ -3071,72 +3095,6 @@ def _get_group_names(regex: Pattern) -> List[Hashable]: return [names.get(1 + i, i) for i in range(regex.groups)] -def _str_extract(arr: ArrayLike, pat: str, flags=0, expand: bool = True): - """ - Find groups in each string in the array using passed regular expression. - - Returns - ------- - np.ndarray or list of lists is expand is True - """ - regex = re.compile(pat, flags=flags) - - empty_row = [np.nan] * regex.groups - - def f(x): - if not isinstance(x, str): - return empty_row - m = regex.search(x) - if m: - return [np.nan if item is None else item for item in m.groups()] - else: - return empty_row - - if expand: - return [f(val) for val in np.asarray(arr)] - - return np.array([f(val)[0] for val in np.asarray(arr)], dtype=object) - - -def str_extract(accessor: StringMethods, pat: str, flags: int = 0, expand: bool = True): - from pandas import ( - DataFrame, - array as pd_array, - ) - - obj = accessor._data - result_dtype = _result_dtype(obj) - regex = re.compile(pat, flags=flags) - returns_df = regex.groups > 1 or expand - - if returns_df: - name = None - columns = _get_group_names(regex) - - if obj.array.size == 0: - result = DataFrame(columns=columns, dtype=result_dtype) - - else: - result_list = _str_extract(obj.array, pat, flags=flags, expand=returns_df) - - result_index: Optional["Index"] - if isinstance(obj, ABCSeries): - result_index = obj.index - else: - result_index = None - - result = DataFrame( - result_list, columns=columns, index=result_index, dtype=result_dtype - ) - - else: - name = _get_single_group_name(regex) - result_arr = _str_extract(obj.array, pat, flags=flags, expand=returns_df) - # not dispatching, so we have to reconstruct here. - result = pd_array(result_arr, dtype=result_dtype) - return accessor._wrap_result(result, name=name) - - def str_extractall(arr, pat, flags=0): regex = re.compile(pat, flags=flags) # the regex must contain capture groups. diff --git a/pandas/core/strings/base.py b/pandas/core/strings/base.py index a77f8861a7c02..9cd0f25c1055a 100644 --- a/pandas/core/strings/base.py +++ b/pandas/core/strings/base.py @@ -222,3 +222,7 @@ def _str_split(self, pat=None, n=-1, expand=False): @abc.abstractmethod def _str_rsplit(self, pat=None, n=-1): pass + + @abc.abstractmethod + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + pass diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index 869eabc76b555..0f54d5f869973 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -19,6 +19,7 @@ ) from pandas.core.dtypes.common import ( + is_object_dtype, is_re, is_scalar, ) @@ -38,7 +39,9 @@ def __len__(self): # For typing, _str_map relies on the object being sized. raise NotImplementedError - def _str_map(self, f, na_value=None, dtype: Optional[Dtype] = None): + def _str_map( + self, f, na_value=None, dtype: Optional[Dtype] = None, convert: bool = True + ): """ Map a callable over valid element of the array. @@ -53,6 +56,8 @@ def _str_map(self, f, na_value=None, dtype: Optional[Dtype] = None): for object-dtype and Categorical and ``pd.NA`` for StringArray. dtype : Dtype, optional The dtype of the result array. + convert : bool, default True + Whether to call `maybe_convert_objects` on the resulting ndarray """ if dtype is None: dtype = np.dtype("object") @@ -66,9 +71,9 @@ def _str_map(self, f, na_value=None, dtype: Optional[Dtype] = None): arr = np.asarray(self, dtype=object) mask = isna(arr) - convert = not np.all(mask) + map_convert = convert and not np.all(mask) try: - result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert) + result = lib.map_infer_mask(arr, f, mask.view(np.uint8), map_convert) except (TypeError, AttributeError) as e: # Reraise the exception if callable `f` got wrong number of args. # The user may want to be warned by this, instead of getting NaN @@ -94,7 +99,7 @@ def g(x): return result if na_value is not np.nan: np.putmask(result, mask, na_value) - if result.dtype == object: + if convert and result.dtype == object: result = lib.maybe_convert_objects(result) return result @@ -408,3 +413,51 @@ def _str_lstrip(self, to_strip=None): def _str_rstrip(self, to_strip=None): return self._str_map(lambda x: x.rstrip(to_strip)) + + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + regex = re.compile(pat, flags=flags) + na_value = self._str_na_value + + if regex.groups == 1: + + def f(x): + m = regex.search(x) + return m.groups()[0] if m else na_value + + return self._str_map(f, convert=False) + else: + out = np.empty((len(self), regex.groups), dtype=object) + + if is_object_dtype(self): + + def f(x): + if not isinstance(x, str): + return na_value + m = regex.search(x) + if m: + return [ + na_value if item is None else item for item in m.groups() + ] + else: + return na_value + + else: + + def f(x): + m = regex.search(x) + if m: + return [ + na_value if item is None else item for item in m.groups() + ] + else: + return na_value + + result = lib.map_infer_mask( + np.asarray(self), + f, + mask=isna(self).view("uint8"), + convert=False, + na_value=na_value, + out=out, + ) + return result
``` before after ratio [170b4391] [62cc9c38] <master> <_str_extract> - 138±0.4ms 117±2ms 0.85 strings.Methods.time_extract('string') - 14.4±0.6ms 11.9±0.06ms 0.82 strings.Methods.time_isdigit('str') - 109±0.4ms 81.1±0.9ms 0.74 strings.Split.time_split(True) - 82.3±0.3ms 46.3±0.2ms 0.56 strings.Split.time_rsplit(True) - 50.0±0.4ms 27.3±0.4ms 0.55 strings.Methods.time_partition('string') - 49.0±0.2ms 26.4±0.4ms 0.54 strings.Methods.time_rpartition('string') - 55.3±0.5ms 28.8±0.3ms 0.52 strings.Methods.time_partition('str') - 54.8±0.4ms 27.8±0.2ms 0.51 strings.Methods.time_rpartition('str') SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. PERFORMANCE INCREASED. ``` marked as draft since it may be better to split the perf improvements from the dispatch to array refactor and also may be better to finish #41085 first. also need to complete the parameterisation of the extract tests. also maybe able to reduce the _wrap_result changes by passing a 2d array without expand kwarg so that expand is inferred and the code removed here will be bypassed. so will look into that before marking as ready for review. if I can reduce the diff here (even if only by doing the test parameterisation as a precursor), then may be worth adding the pyarrow native implementation here also.
https://api.github.com/repos/pandas-dev/pandas/pulls/41372
2021-05-07T16:35:31Z
2021-05-24T19:38:28Z
null
2021-05-24T19:38:29Z
CLN: Remove raise if missing only controlling the error message
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a1b46c42f6d68..9a57d86d62fdc 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3412,7 +3412,7 @@ def __getitem__(self, key): else: if is_iterator(key): key = list(key) - indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1] + indexer = self.loc._get_listlike_indexer(key, axis=1)[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index b267472eba573..96aeda955df01 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -11,8 +11,6 @@ import numpy as np -from pandas._config.config import option_context - from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas.errors import ( @@ -1089,7 +1087,7 @@ def _getitem_iterable(self, key, axis: int): self._validate_key(key, axis) # A collection of keys - keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False) + keyarr, indexer = self._get_listlike_indexer(key, axis) return self.obj._reindex_with_indexers( {axis: [keyarr, indexer]}, copy=True, allow_dups=True ) @@ -1255,8 +1253,7 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): (inds,) = key.nonzero() return inds else: - # When setting, missing keys are not allowed, even with .loc: - return self._get_listlike_indexer(key, axis, raise_missing=True)[1] + return self._get_listlike_indexer(key, axis)[1] else: try: return labels.get_loc(key) @@ -1266,7 +1263,7 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): return {"key": key} raise - def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): + def _get_listlike_indexer(self, key, axis: int): """ Transform a list-like of keys into a new index and an indexer. @@ -1276,16 +1273,11 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): Targeted labels. axis: int Dimension on which the indexing is being made. - raise_missing: bool, default False - Whether to raise a KeyError if some labels were not found. - Will be removed in the future, and then this method will always behave as - if ``raise_missing=True``. Raises ------ KeyError - If at least one key was requested but none was found, and - raise_missing=True. + If at least one key was requested but none was found. Returns ------- @@ -1310,12 +1302,10 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): else: keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr) - self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing) + self._validate_read_indexer(keyarr, indexer, axis) return keyarr, indexer - def _validate_read_indexer( - self, key, indexer, axis: int, raise_missing: bool = False - ): + def _validate_read_indexer(self, key, indexer, axis: int): """ Check that indexer can be used to return a result. @@ -1331,16 +1321,11 @@ def _validate_read_indexer( (with -1 indicating not found). axis : int Dimension on which the indexing is being made. - raise_missing: bool - Whether to raise a KeyError if some labels are not found. Will be - removed in the future, and then this method will always behave as - if raise_missing=True. Raises ------ KeyError - If at least one key was requested but none was found, and - raise_missing=True. + If at least one key was requested but none was found. """ if len(key) == 0: return @@ -1356,21 +1341,8 @@ def _validate_read_indexer( ax = self.obj._get_axis(axis) - # We (temporarily) allow for some missing keys with .loc, except in - # some cases (e.g. setting) in which "raise_missing" will be False - if raise_missing: - not_found = list(set(key) - set(ax)) - raise KeyError(f"{not_found} not in index") - - not_found = key[missing_mask] - - with option_context("display.max_seq_items", 10, "display.width", 80): - raise KeyError( - "Passing list-likes to .loc or [] with any missing labels " - "is no longer supported. " - f"The following labels were missing: {not_found}. " - "See https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike" # noqa:E501 - ) + not_found = list(set(key) - set(ax)) + raise KeyError(f"{not_found} not in index") @doc(IndexingMixin.iloc) diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 11943d353e8c8..cd49620f45fae 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -296,12 +296,7 @@ def test_loc_getitem_listlike_labels(self): def test_loc_getitem_listlike_unused_category(self): # GH#37901 a label that is in index.categories but not in index # listlike containing an element in the categories but not in the values - msg = ( - "The following labels were missing: CategoricalIndex(['e'], " - "categories=['c', 'a', 'b', 'e'], ordered=False, name='B', " - "dtype='category')" - ) - with pytest.raises(KeyError, match=re.escape(msg)): + with pytest.raises(KeyError, match=re.escape("['e'] not in index")): self.df2.loc[["a", "b", "e"]] def test_loc_getitem_label_unused_category(self): @@ -311,10 +306,7 @@ def test_loc_getitem_label_unused_category(self): def test_loc_getitem_non_category(self): # not all labels in the categories - msg = ( - "The following labels were missing: Index(['d'], dtype='object', name='B')" - ) - with pytest.raises(KeyError, match=re.escape(msg)): + with pytest.raises(KeyError, match=re.escape("['d'] not in index")): self.df2.loc[["a", "d"]] def test_loc_setitem_expansion_label_unused_category(self): @@ -346,8 +338,7 @@ def test_loc_listlike_dtypes(self): exp = DataFrame({"A": [1, 1, 2], "B": [4, 4, 5]}, index=exp_index) tm.assert_frame_equal(res, exp, check_index_type=True) - msg = "The following labels were missing: Index(['x'], dtype='object')" - with pytest.raises(KeyError, match=re.escape(msg)): + with pytest.raises(KeyError, match=re.escape("['x'] not in index")): df.loc[["a", "x"]] def test_loc_listlike_dtypes_duplicated_categories_and_codes(self): @@ -370,8 +361,7 @@ def test_loc_listlike_dtypes_duplicated_categories_and_codes(self): ) tm.assert_frame_equal(res, exp, check_index_type=True) - msg = "The following labels were missing: Index(['x'], dtype='object')" - with pytest.raises(KeyError, match=re.escape(msg)): + with pytest.raises(KeyError, match=re.escape("['x'] not in index")): df.loc[["a", "x"]] def test_loc_listlike_dtypes_unused_category(self): @@ -394,11 +384,10 @@ def test_loc_listlike_dtypes_unused_category(self): ) tm.assert_frame_equal(res, exp, check_index_type=True) - msg = "The following labels were missing: Index(['x'], dtype='object')" - with pytest.raises(KeyError, match=re.escape(msg)): + with pytest.raises(KeyError, match=re.escape("['x'] not in index")): df.loc[["a", "x"]] - def test_loc_getitem_listlike_unused_category_raises_keyerro(self): + def test_loc_getitem_listlike_unused_category_raises_keyerror(self): # key that is an *unused* category raises index = CategoricalIndex(["a", "b", "a", "c"], categories=list("abcde")) df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]}, index=index) @@ -407,13 +396,7 @@ def test_loc_getitem_listlike_unused_category_raises_keyerro(self): # For comparison, check the scalar behavior df.loc["e"] - msg = ( - "Passing list-likes to .loc or [] with any missing labels is no " - "longer supported. The following labels were missing: " - "CategoricalIndex(['e'], categories=['a', 'b', 'c', 'd', 'e'], " - "ordered=False, dtype='category'). See https" - ) - with pytest.raises(KeyError, match=re.escape(msg)): + with pytest.raises(KeyError, match=re.escape("['e'] not in index")): df.loc[["a", "e"]] def test_ix_categorical_index(self): diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index a84be049ebff4..6116c34f238e2 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -535,10 +535,10 @@ def test_floating_misc(self, indexer_sl): result2 = s.iloc[[0, 2, 4]] tm.assert_series_equal(result1, result2) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): indexer_sl(s)[[1.6, 5, 10]] - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): indexer_sl(s)[[0, 1, 2]] result = indexer_sl(s)[[2.5, 5]] diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 446b616111e9e..1f50dacc4dffd 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -800,7 +800,7 @@ def test_iloc_non_unique_indexing(self): df2 = DataFrame({"A": [0.1] * 1000, "B": [1] * 1000}) df2 = concat([df2, 2 * df2, 3 * df2]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): df2.loc[idx] def test_iloc_empty_list_indexer_is_ok(self): diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index df688d6745096..0c20622311e1f 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -247,12 +247,12 @@ def test_dups_fancy_indexing_not_in_order(self): tm.assert_frame_equal(result, expected) rows = ["C", "B", "E"] - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): df.loc[rows] # see GH5553, make sure we use the right indexer rows = ["F", "G", "H", "C", "B", "E"] - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): df.loc[rows] def test_dups_fancy_indexing_only_missing_label(self): @@ -274,14 +274,14 @@ def test_dups_fancy_indexing_missing_label(self, vals): # GH 4619; duplicate indexer with missing label df = DataFrame({"A": vals}) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): df.loc[[0, 8, 0]] def test_dups_fancy_indexing_non_unique(self): # non unique with non unique selector df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): df.loc[["A", "A", "E"]] def test_dups_fancy_indexing2(self): @@ -289,7 +289,7 @@ def test_dups_fancy_indexing2(self): # dups on index and missing values df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): df.loc[:, ["A", "B", "C"]] def test_dups_fancy_indexing3(self): diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 11391efde4956..382ea8c382824 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -293,11 +293,11 @@ def test_getitem_label_list_with_missing(self): s = Series(range(3), index=["a", "b", "c"]) # consistency - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): s[["a", "d"]] s = Series(range(3)) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): s[[0, 3]] @pytest.mark.parametrize("index", [[True, False], [True, False, True, False]]) @@ -349,7 +349,7 @@ def test_loc_to_fail(self): s.loc[["4"]] s.loc[-1] = 3 - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): s.loc[[-1, -2]] s["a"] = 2 @@ -396,7 +396,7 @@ def test_loc_getitem_list_with_fail(self): s.loc[[3]] # a non-match and a match - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): s.loc[[2, 3]] def test_loc_index(self): @@ -2249,12 +2249,7 @@ def test_loc_getitem_list_of_labels_categoricalindex_with_na(self, box): ser2 = ser[:-1] ci2 = ci[1:] # but if there are no NAs present, this should raise KeyError - msg = ( - r"Passing list-likes to .loc or \[\] with any missing labels is no " - "longer supported. The following labels were missing: " - r"(Categorical)?Index\(\[nan\], .*\). " - "See https" - ) + msg = "not in index" with pytest.raises(KeyError, match=msg): ser2.loc[box(ci2)] @@ -2264,41 +2259,13 @@ def test_loc_getitem_list_of_labels_categoricalindex_with_na(self, box): with pytest.raises(KeyError, match=msg): ser2.to_frame().loc[box(ci2)] - def test_loc_getitem_many_missing_labels_inside_error_message_limited(self): - # GH#34272 - n = 10000 - missing_labels = [f"missing_{label}" for label in range(n)] - ser = Series({"a": 1, "b": 2, "c": 3}) - # regex checks labels between 4 and 9995 are replaced with ellipses - error_message_regex = "missing_4.*\\.\\.\\..*missing_9995" - with pytest.raises(KeyError, match=error_message_regex): - ser.loc[["a", "c"] + missing_labels] - - def test_loc_getitem_missing_labels_inside_matched_in_error_message(self): - # GH#34272 - ser = Series({"a": 1, "b": 2, "c": 3}) - error_message_regex = "missing_0.*missing_1.*missing_2" - with pytest.raises(KeyError, match=error_message_regex): - ser.loc[["a", "b", "missing_0", "c", "missing_1", "missing_2"]] - - def test_loc_getitem_long_text_missing_labels_inside_error_message_limited(self): - # GH#34272 - ser = Series({"a": 1, "b": 2, "c": 3}) - missing_labels = [f"long_missing_label_text_{i}" * 5 for i in range(3)] - # regex checks for very long labels there are new lines between each - error_message_regex = ( - "long_missing_label_text_0.*\\\\n.*long_missing_label_text_1" - ) - with pytest.raises(KeyError, match=error_message_regex): - ser.loc[["a", "c"] + missing_labels] - def test_loc_getitem_series_label_list_missing_values(self): # gh-11428 key = np.array( ["2001-01-04", "2001-01-02", "2001-01-04", "2001-01-14"], dtype="datetime64" ) ser = Series([2, 5, 8, 11], date_range("2001-01-01", freq="D", periods=4)) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): ser.loc[key] def test_loc_getitem_series_label_list_missing_integer_values(self): @@ -2307,7 +2274,7 @@ def test_loc_getitem_series_label_list_missing_integer_values(self): index=np.array([9730701000001104, 10049011000001109]), data=np.array([999000011000001104, 999000011000001104]), ) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): ser.loc[np.array([9730701000001104, 10047311000001102])] @pytest.mark.parametrize("to_period", [True, False]) @@ -2349,7 +2316,7 @@ def test_loc_getitem_listlike_of_datetimelike_keys(self, to_period): if to_period: keys = [x.to_period("D") for x in keys] - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): ser.loc[keys] diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index b8680cc4e611e..dd26a978fe81d 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -199,14 +199,14 @@ def test_series_partial_set(self): # loc equiv to .reindex expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match=r"not in index"): ser.loc[[3, 2, 3]] result = ser.reindex([3, 2, 3]) tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, "x"]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): ser.loc[[3, 2, 3, "x"]] result = ser.reindex([3, 2, 3, "x"]) @@ -217,7 +217,7 @@ def test_series_partial_set(self): tm.assert_series_equal(result, expected, check_index_type=True) expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, "x", 1]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): ser.loc[[2, 2, "x", 1]] result = ser.reindex([2, 2, "x", 1]) @@ -232,7 +232,7 @@ def test_series_partial_set(self): ser.loc[[3, 3, 3]] expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): ser.loc[[2, 2, 3]] result = ser.reindex([2, 2, 3]) @@ -240,7 +240,7 @@ def test_series_partial_set(self): s = Series([0.1, 0.2, 0.3], index=[1, 2, 3]) expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): s.loc[[3, 4, 4]] result = s.reindex([3, 4, 4]) @@ -248,7 +248,7 @@ def test_series_partial_set(self): s = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]) expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): s.loc[[5, 3, 3]] result = s.reindex([5, 3, 3]) @@ -256,7 +256,7 @@ def test_series_partial_set(self): s = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]) expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): s.loc[[5, 4, 4]] result = s.reindex([5, 4, 4]) @@ -264,7 +264,7 @@ def test_series_partial_set(self): s = Series([0.1, 0.2, 0.3, 0.4], index=[4, 5, 6, 7]) expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): s.loc[[7, 2, 2]] result = s.reindex([7, 2, 2]) @@ -272,7 +272,7 @@ def test_series_partial_set(self): s = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]) expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): s.loc[[4, 5, 5]] result = s.reindex([4, 5, 5]) @@ -290,10 +290,10 @@ def test_series_partial_set_with_name(self): ser = Series([0.1, 0.2], index=idx, name="s") # loc - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match=r"\[3\] not in index"): ser.loc[[3, 2, 3]] - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match=r"not in index"): ser.loc[[3, 2, 3, "x"]] exp_idx = Index([2, 2, 1], dtype="int64", name="idx") @@ -301,7 +301,7 @@ def test_series_partial_set_with_name(self): result = ser.loc[[2, 2, 1]] tm.assert_series_equal(result, expected, check_index_type=True) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match=r"\['x'\] not in index"): ser.loc[[2, 2, "x", 1]] # raises as nothing is in the index @@ -312,27 +312,27 @@ def test_series_partial_set_with_name(self): with pytest.raises(KeyError, match=msg): ser.loc[[3, 3, 3]] - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): ser.loc[[2, 2, 3]] idx = Index([1, 2, 3], dtype="int64", name="idx") - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): Series([0.1, 0.2, 0.3], index=idx, name="s").loc[[3, 4, 4]] idx = Index([1, 2, 3, 4], dtype="int64", name="idx") - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): Series([0.1, 0.2, 0.3, 0.4], index=idx, name="s").loc[[5, 3, 3]] idx = Index([1, 2, 3, 4], dtype="int64", name="idx") - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): Series([0.1, 0.2, 0.3, 0.4], index=idx, name="s").loc[[5, 4, 4]] idx = Index([4, 5, 6, 7], dtype="int64", name="idx") - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): Series([0.1, 0.2, 0.3, 0.4], index=idx, name="s").loc[[7, 2, 2]] idx = Index([1, 2, 3, 4], dtype="int64", name="idx") - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): Series([0.1, 0.2, 0.3, 0.4], index=idx, name="s").loc[[4, 5, 5]] # iloc @@ -591,7 +591,7 @@ def test_loc_with_list_of_strings_representing_datetimes_missing_value( # GH 11278 s = Series(range(20), index=idx) df = DataFrame(range(20), index=idx) - msg = r"with any missing labels" + msg = r"not in index" with pytest.raises(KeyError, match=msg): s.loc[labels] diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index 9a166fc8057ed..0e43e351bc082 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -604,10 +604,10 @@ def test_getitem_with_integer_labels(): ser = Series(np.random.randn(10), index=list(range(0, 20, 2))) inds = [0, 2, 5, 7, 8] arr_inds = np.array([0, 2, 5, 7, 8]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): ser[inds] - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match="not in index"): ser[arr_inds] diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 30c37113f6b8f..6c3587c7eeada 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -1,6 +1,6 @@ """ test get/set & misc """ - from datetime import timedelta +import re import numpy as np import pytest @@ -149,7 +149,7 @@ def test_getitem_dups_with_missing(indexer_sl): # breaks reindex, so need to use .loc internally # GH 4246 ser = Series([1, 2, 3, 4], ["foo", "bar", "foo", "bah"]) - with pytest.raises(KeyError, match="with any missing labels"): + with pytest.raises(KeyError, match=re.escape("['bam'] not in index")): indexer_sl(ser)[["foo", "bar", "bah", "bam"]]
- [x] xref #41170 The removed tests don't make sense anymore, since all is printed now but without a fixed order
https://api.github.com/repos/pandas-dev/pandas/pulls/41371
2021-05-07T16:32:36Z
2021-05-07T22:07:35Z
2021-05-07T22:07:35Z
2021-05-12T01:57:02Z
Pin fastparquet to leq 0.5.0
diff --git a/ci/deps/actions-37-db.yaml b/ci/deps/actions-37-db.yaml index 8755e1a02c3cf..edca7b51a3420 100644 --- a/ci/deps/actions-37-db.yaml +++ b/ci/deps/actions-37-db.yaml @@ -15,7 +15,7 @@ dependencies: - beautifulsoup4 - botocore>=1.11 - dask - - fastparquet>=0.4.0 + - fastparquet>=0.4.0, <=0.5.0 - fsspec>=0.7.4 - gcsfs>=0.6.0 - geopandas diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 661d8813d32d2..fdea34d573340 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -15,7 +15,7 @@ dependencies: # pandas dependencies - blosc - bottleneck - - fastparquet>=0.4.0 + - fastparquet>=0.4.0, <=0.5.0 - flask - fsspec>=0.8.0 - matplotlib=3.1.3 diff --git a/environment.yml b/environment.yml index 2e0228a15272e..30fa7c0dea696 100644 --- a/environment.yml +++ b/environment.yml @@ -99,7 +99,7 @@ dependencies: - xlwt - odfpy - - fastparquet>=0.3.2 # pandas.read_parquet, DataFrame.to_parquet + - fastparquet>=0.3.2, <=0.5.0 # pandas.read_parquet, DataFrame.to_parquet - pyarrow>=0.15.0 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - python-snappy # required by pyarrow diff --git a/requirements-dev.txt b/requirements-dev.txt index ea7ca43742934..3e421c7715566 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -64,7 +64,7 @@ xlrd xlsxwriter xlwt odfpy -fastparquet>=0.3.2 +fastparquet>=0.3.2, <=0.5.0 pyarrow>=0.15.0 python-snappy pyqt5>=5.9.2
- [x] xref #41366 Pinning for now to get ci passing Labeled the issue with 1.3 for now so that we don't forget it
https://api.github.com/repos/pandas-dev/pandas/pulls/41370
2021-05-07T14:59:58Z
2021-05-07T16:41:07Z
2021-05-07T16:41:07Z
2021-06-01T14:58:14Z
r
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 85d9acff353be..4ef8fe116596f 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -695,7 +695,7 @@ Conversion - Bug in :class:`Index` construction silently ignoring a passed ``dtype`` when the data cannot be cast to that dtype (:issue:`21311`) - Bug in :meth:`StringArray.astype` falling back to numpy and raising when converting to ``dtype='categorical'`` (:issue:`40450`) - Bug in :class:`DataFrame` construction with a dictionary containing an arraylike with ``ExtensionDtype`` and ``copy=True`` failing to make a copy (:issue:`38939`) -- +- Bug in :meth:`qcut` raising error when taking ``Float64DType`` as input (:issue:`40730`) Strings ^^^^^^^ diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 41e1ff41d9ba2..7b9c3883d74e3 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -24,8 +24,8 @@ is_datetime_or_timedelta_dtype, is_extension_array_dtype, is_integer, - is_integer_dtype, is_list_like, + is_numeric_dtype, is_scalar, is_timedelta64_dtype, ) @@ -488,7 +488,7 @@ def _coerce_to_type(x): # Will properly support in the future. # https://github.com/pandas-dev/pandas/pull/31290 # https://github.com/pandas-dev/pandas/issues/31389 - elif is_extension_array_dtype(x.dtype) and is_integer_dtype(x.dtype): + elif is_extension_array_dtype(x.dtype) and is_numeric_dtype(x.dtype): x = x.to_numpy(dtype=np.float64, na_value=np.nan) if dtype is not None: diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py index 7996c15ae8e64..c12d28f6f1380 100644 --- a/pandas/tests/reshape/test_qcut.py +++ b/pandas/tests/reshape/test_qcut.py @@ -293,8 +293,8 @@ def test_qcut_bool_coercion_to_int(bins, box, compare): @pytest.mark.parametrize("q", [2, 5, 10]) -def test_qcut_nullable_integer(q, any_nullable_int_dtype): - arr = pd.array(np.arange(100), dtype=any_nullable_int_dtype) +def test_qcut_nullable_integer(q, any_nullable_numeric_dtype): + arr = pd.array(np.arange(100), dtype=any_nullable_numeric_dtype) arr[::2] = pd.NA result = qcut(arr, q)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41365
2021-05-07T08:20:43Z
2021-05-07T08:21:47Z
null
2021-05-07T13:04:00Z
To csv sparse
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 92f9d803d1ebe..9b7d455d14c34 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2037,6 +2037,7 @@ def to_native_types( mask = isna(values) new_values = np.asarray(values.astype(object)) + new_values[mask] = na_rep return new_values
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41364
2021-05-07T04:40:48Z
2021-05-07T04:41:13Z
null
2021-05-07T04:41:13Z
CLN: groupby
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 55e8578b2cef4..9287163053cac 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -688,9 +688,9 @@ def describe(self, **kwargs): def value_counts( self, - normalize=False, - sort=True, - ascending=False, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, bins=None, dropna: bool = True, ): @@ -715,7 +715,7 @@ def apply_series_value_counts(): # scalar bins cannot be done at top level # in a backward compatible way return apply_series_value_counts() - elif is_categorical_dtype(val): + elif is_categorical_dtype(val.dtype): # GH38672 return apply_series_value_counts() @@ -807,44 +807,36 @@ def apply_series_value_counts(): sorter = np.lexsort((out if ascending else -out, cat)) out, codes[-1] = out[sorter], codes[-1][sorter] - if bins is None: - mi = MultiIndex( - levels=levels, codes=codes, names=names, verify_integrity=False - ) - - if is_integer_dtype(out): - out = ensure_int64(out) - return self.obj._constructor(out, index=mi, name=self._selection_name) - - # for compat. with libgroupby.value_counts need to ensure every - # bin is present at every index level, null filled with zeros - diff = np.zeros(len(out), dtype="bool") - for level_codes in codes[:-1]: - diff |= np.r_[True, level_codes[1:] != level_codes[:-1]] + if bins is not None: + # for compat. with libgroupby.value_counts need to ensure every + # bin is present at every index level, null filled with zeros + diff = np.zeros(len(out), dtype="bool") + for level_codes in codes[:-1]: + diff |= np.r_[True, level_codes[1:] != level_codes[:-1]] - ncat, nbin = diff.sum(), len(levels[-1]) + ncat, nbin = diff.sum(), len(levels[-1]) - left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)] + left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)] - right = [diff.cumsum() - 1, codes[-1]] + right = [diff.cumsum() - 1, codes[-1]] - _, idx = get_join_indexers(left, right, sort=False, how="left") - out = np.where(idx != -1, out[idx], 0) + _, idx = get_join_indexers(left, right, sort=False, how="left") + out = np.where(idx != -1, out[idx], 0) - if sort: - sorter = np.lexsort((out if ascending else -out, left[0])) - out, left[-1] = out[sorter], left[-1][sorter] + if sort: + sorter = np.lexsort((out if ascending else -out, left[0])) + out, left[-1] = out[sorter], left[-1][sorter] - # build the multi-index w/ full levels - def build_codes(lev_codes: np.ndarray) -> np.ndarray: - return np.repeat(lev_codes[diff], nbin) + # build the multi-index w/ full levels + def build_codes(lev_codes: np.ndarray) -> np.ndarray: + return np.repeat(lev_codes[diff], nbin) - codes = [build_codes(lev_codes) for lev_codes in codes[:-1]] - codes.append(left[-1]) + codes = [build_codes(lev_codes) for lev_codes in codes[:-1]] + codes.append(left[-1]) mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False) - if is_integer_dtype(out): + if is_integer_dtype(out.dtype): out = ensure_int64(out) return self.obj._constructor(out, index=mi, name=self._selection_name) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index c20a9b7ad2210..1105c1bd1d782 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1837,7 +1837,7 @@ def first(x: Series): return obj.apply(first, axis=axis) elif isinstance(obj, Series): return first(obj) - else: + else: # pragma: no cover raise TypeError(type(obj)) return self._agg_general( @@ -1862,7 +1862,7 @@ def last(x: Series): return obj.apply(last, axis=axis) elif isinstance(obj, Series): return last(obj) - else: + else: # pragma: no cover raise TypeError(type(obj)) return self._agg_general( @@ -3271,7 +3271,7 @@ def get_groupby( from pandas.core.groupby.generic import DataFrameGroupBy klass = DataFrameGroupBy - else: + else: # pragma: no cover raise TypeError(f"invalid type: {obj}") return klass( diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index b88f2b0200768..46b47bc29d8a6 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -276,11 +276,11 @@ def get_out_dtype(self, dtype: np.dtype) -> np.dtype: @overload def _get_result_dtype(self, dtype: np.dtype) -> np.dtype: - ... + ... # pragma: no cover @overload def _get_result_dtype(self, dtype: ExtensionDtype) -> ExtensionDtype: - ... + ... # pragma: no cover def _get_result_dtype(self, dtype: DtypeObj) -> DtypeObj: """
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41363
2021-05-07T04:18:19Z
2021-05-07T19:34:32Z
2021-05-07T19:34:32Z
2021-05-07T19:38:41Z
CLN: trim unreachable groupby paths
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 18506b871bda6..b2041a3e1380a 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -266,7 +266,11 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) func = maybe_mangle_lambdas(func) ret = self._aggregate_multiple_funcs(func) if relabeling: - ret.columns = columns + # error: Incompatible types in assignment (expression has type + # "Optional[List[str]]", variable has type "Index") + ret.columns = columns # type: ignore[assignment] + return ret + else: cyfunc = com.get_cython_func(func) if cyfunc and not args and not kwargs: @@ -282,33 +286,21 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) # see test_groupby.test_basic result = self._aggregate_named(func, *args, **kwargs) - index = Index(sorted(result), name=self.grouper.names[0]) - ret = create_series_with_explicit_dtype( - result, index=index, dtype_if_empty=object - ) - - if not self.as_index: # pragma: no cover - print("Warning, ignoring as_index=True") - - if isinstance(ret, dict): - from pandas import concat - - ret = concat(ret.values(), axis=1, keys=[key.label for key in ret.keys()]) - return ret + index = Index(sorted(result), name=self.grouper.names[0]) + return create_series_with_explicit_dtype( + result, index=index, dtype_if_empty=object + ) agg = aggregate - def _aggregate_multiple_funcs(self, arg): + def _aggregate_multiple_funcs(self, arg) -> DataFrame: if isinstance(arg, dict): # show the deprecation, but only if we # have not shown a higher level one # GH 15931 - if isinstance(self._selected_obj, Series): - raise SpecificationError("nested renamer is not supported") + raise SpecificationError("nested renamer is not supported") - columns = list(arg.keys()) - arg = arg.items() elif any(isinstance(x, (tuple, list)) for x in arg): arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg] @@ -335,8 +327,14 @@ def _aggregate_multiple_funcs(self, arg): results[base.OutputKey(label=name, position=idx)] = obj.aggregate(func) if any(isinstance(x, DataFrame) for x in results.values()): - # let higher level handle - return results + from pandas import concat + + res_df = concat( + results.values(), axis=1, keys=[key.label for key in results.keys()] + ) + # error: Incompatible return value type (got "Union[DataFrame, Series]", + # expected "DataFrame") + return res_df # type: ignore[return-value] indexed_output = {key.position: val for key, val in results.items()} output = self.obj._constructor_expanddim(indexed_output, index=None) @@ -1000,6 +998,11 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) result = op.agg() if not is_dict_like(func) and result is not None: return result + elif relabeling and result is not None: + # this should be the only (non-raising) case with relabeling + # used reordered index of columns + result = result.iloc[:, order] + result.columns = columns if result is None: @@ -1039,12 +1042,6 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) [sobj.columns.name] * result.columns.nlevels ).droplevel(-1) - if relabeling: - - # used reordered index of columns - result = result.iloc[:, order] - result.columns = columns - if not self.as_index: self._insert_inaxis_grouper_inplace(result) result.index = np.arange(len(result)) @@ -1389,9 +1386,7 @@ def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame: if not output: raise TypeError("Transform function invalid for data types") - columns = obj.columns - if len(output) < len(obj.columns): - columns = columns.take(inds) + columns = obj.columns.take(inds) return self.obj._constructor(output, index=obj.index, columns=columns) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index c5ef18c51a533..d222bc1c083ed 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2174,7 +2174,9 @@ def backfill(self, limit=None): @final @Substitution(name="groupby") @Substitution(see_also=_common_see_also) - def nth(self, n: int | list[int], dropna: str | None = None) -> DataFrame: + def nth( + self, n: int | list[int], dropna: Literal["any", "all", None] = None + ) -> DataFrame: """ Take the nth row from each group if n is an int, or a subset of rows if n is a list of ints. @@ -2187,9 +2189,9 @@ def nth(self, n: int | list[int], dropna: str | None = None) -> DataFrame: ---------- n : int or list of ints A single nth value for the row or a list of nth values. - dropna : None or str, optional + dropna : {'any', 'all', None}, default None Apply the specified dropna operation before counting which row is - the nth row. Needs to be None, 'any' or 'all'. + the nth row. Returns -------
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41361
2021-05-06T23:10:06Z
2021-05-07T15:53:05Z
2021-05-07T15:53:05Z
2023-02-09T04:18:20Z
REF: test_invalid_dtype in numeric index tests
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 45e1b615b1ade..857b136b67a0c 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -831,3 +831,10 @@ def test_arithmetic_explicit_conversions(self): a = np.zeros(5, dtype="float64") result = a - fidx tm.assert_index_equal(result, expected) + + def test_invalid_dtype(self, invalid_dtype): + # GH 29539 + dtype = invalid_dtype + msg = fr"Incorrect `dtype` passed: expected \w+(?: \w+)?, received {dtype}" + with pytest.raises(ValueError, match=msg): + self._index_cls([1, 2, 3], dtype=dtype) diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py index bfe06d74570da..e63aeba54fccd 100644 --- a/pandas/tests/indexes/numeric/test_numeric.py +++ b/pandas/tests/indexes/numeric/test_numeric.py @@ -8,7 +8,6 @@ Float64Index, Index, Int64Index, - RangeIndex, Series, UInt64Index, ) @@ -20,6 +19,12 @@ class TestFloat64Index(NumericBase): _index_cls = Float64Index _dtype = np.float64 + @pytest.fixture( + params=["int64", "uint64", "category", "datetime64"], + ) + def invalid_dtype(self, request): + return request.param + @pytest.fixture def simple_index(self) -> Index: values = np.arange(5, dtype=self._dtype) @@ -104,23 +109,6 @@ def test_constructor(self): assert result.dtype == dtype assert pd.isna(result.values).all() - @pytest.mark.parametrize( - "index, dtype", - [ - (Int64Index, "float64"), - (UInt64Index, "categorical"), - (Float64Index, "datetime64"), - (RangeIndex, "float64"), - ], - ) - def test_invalid_dtype(self, index, dtype): - # GH 29539 - with pytest.raises( - ValueError, - match=rf"Incorrect `dtype` passed: expected \w+(?: \w+)?, received {dtype}", - ): - index([1, 2, 3], dtype=dtype) - def test_constructor_invalid(self): index_cls = self._index_cls cls_name = index_cls.__name__ @@ -394,6 +382,12 @@ class TestInt64Index(NumericInt): _index_cls = Int64Index _dtype = np.int64 + @pytest.fixture( + params=["uint64", "float64", "category", "datetime64"], + ) + def invalid_dtype(self, request): + return request.param + @pytest.fixture def simple_index(self) -> Index: return self._index_cls(range(0, 20, 2), dtype=self._dtype) @@ -492,6 +486,12 @@ class TestUInt64Index(NumericInt): _index_cls = UInt64Index _dtype = np.uint64 + @pytest.fixture( + params=["int64", "float64", "category", "datetime64"], + ) + def invalid_dtype(self, request): + return request.param + @pytest.fixture def simple_index(self) -> Index: # compat with shared Int64/Float64 tests diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 3a4aa29ea620e..12fce56ffcb21 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -23,6 +23,12 @@ class TestRangeIndex(NumericBase): _index_cls = RangeIndex + @pytest.fixture( + params=["uint64", "float64", "category", "datetime64"], + ) + def invalid_dtype(self, request): + return request.param + @pytest.fixture def simple_index(self) -> Index: return self._index_cls(start=0, stop=20, step=2)
The invalid dtype tests for all the numeric index types are currently done in `tests.numeric.test_numeric.py::TestFloat64Index::test_invalid_dtype`. This PR moves them to the appropriate test class (`TestInt64Index` etc.).
https://api.github.com/repos/pandas-dev/pandas/pulls/41359
2021-05-06T20:55:17Z
2021-05-07T12:43:27Z
2021-05-07T12:43:27Z
2021-05-07T13:09:43Z
Bug in loc not raising KeyError with MultiIndex containing no longer used levels
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index cf3dd1b0e3226..495ed86f2cc4e 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -787,6 +787,7 @@ Indexing - Bug in setting ``numpy.timedelta64`` values into an object-dtype :class:`Series` using a boolean indexer (:issue:`39488`) - Bug in setting numeric values into a into a boolean-dtypes :class:`Series` using ``at`` or ``iat`` failing to cast to object-dtype (:issue:`39582`) - Bug in :meth:`DataFrame.__setitem__` and :meth:`DataFrame.iloc.__setitem__` raising ``ValueError`` when trying to index with a row-slice and setting a list as values (:issue:`40440`) +- Bug in :meth:`DataFrame.loc` not raising ``KeyError`` when key was not found in :class:`MultiIndex` when levels contain more values than used (:issue:`41170`) - Bug in :meth:`DataFrame.loc.__setitem__` when setting-with-expansion incorrectly raising when the index in the expanding axis contains duplicates (:issue:`40096`) - Bug in :meth:`DataFrame.loc` incorrectly matching non-boolean index elements (:issue:`20432`) - Bug in :meth:`Series.__delitem__` with ``ExtensionDtype`` incorrectly casting to ``ndarray`` (:issue:`40386`) @@ -806,6 +807,7 @@ MultiIndex - Bug in :meth:`MultiIndex.intersection` duplicating ``NaN`` in result (:issue:`38623`) - Bug in :meth:`MultiIndex.equals` incorrectly returning ``True`` when :class:`MultiIndex` containing ``NaN`` even when they are differently ordered (:issue:`38439`) - Bug in :meth:`MultiIndex.intersection` always returning empty when intersecting with :class:`CategoricalIndex` (:issue:`38653`) +- Bug in :meth:`MultiIndex.reindex` raising ``ValueError`` with empty MultiIndex and indexing only a specific level (:issue:`41170`) I/O ^^^ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 84f1245299d53..3785ade4688d2 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4229,7 +4229,8 @@ def _get_leaf_sorter(labels: list[np.ndarray]) -> np.ndarray: else: # tie out the order with other if level == 0: # outer most level, take the fast route - ngroups = 1 + new_lev_codes.max() + max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() + ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a68238af003e4..c1295a98bf357 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -72,6 +72,7 @@ from pandas.core.arrays import Categorical from pandas.core.arrays.categorical import factorize_from_iterables import pandas.core.common as com +from pandas.core.indexers import is_empty_indexer import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, @@ -2634,6 +2635,10 @@ def _convert_listlike_indexer(self, keyarr): mask = check == -1 if mask.any(): raise KeyError(f"{keyarr[mask]} not in index") + elif is_empty_indexer(indexer, keyarr): + # We get here when levels still contain values which are not + # actually in Index anymore + raise KeyError(f"{keyarr} not in index") return indexer, keyarr diff --git a/pandas/tests/indexes/multi/test_reindex.py b/pandas/tests/indexes/multi/test_reindex.py index 5ed34cd766bce..3b0fcd72f3123 100644 --- a/pandas/tests/indexes/multi/test_reindex.py +++ b/pandas/tests/indexes/multi/test_reindex.py @@ -104,3 +104,14 @@ def test_reindex_non_unique(): msg = "cannot handle a non-unique multi-index!" with pytest.raises(ValueError, match=msg): a.reindex(new_idx) + + +@pytest.mark.parametrize("values", [[["a"], ["x"]], [[], []]]) +def test_reindex_empty_with_level(values): + # GH41170 + idx = MultiIndex.from_arrays(values) + result, result_indexer = idx.reindex(np.array(["b"]), level=0) + expected = MultiIndex(levels=[["b"], values[1]], codes=[[], []]) + expected_indexer = np.array([], dtype=result_indexer.dtype) + tm.assert_index_equal(result, expected) + tm.assert_numpy_array_equal(result_indexer, expected_indexer) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 11391efde4956..a1c646b4dc0b5 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1624,6 +1624,13 @@ def test_loc_getitem_preserves_index_level_category_dtype(self): result = df.loc[["a"]].index.levels[0] tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("lt_value", [30, 10]) + def test_loc_multiindex_levels_contain_values_not_in_index_anymore(self, lt_value): + # GH#41170 + df = DataFrame({"a": [12, 23, 34, 45]}, index=[list("aabb"), [0, 1, 2, 3]]) + with pytest.raises(KeyError, match=r"\['b'\] not in index"): + df.loc[df["a"] < lt_value, :].loc[["b"], :] + class TestLocSetitemWithExpansion: @pytest.mark.slow diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py index 8e54cbeb313c4..36d3971d10a3d 100644 --- a/pandas/tests/series/methods/test_reindex.py +++ b/pandas/tests/series/methods/test_reindex.py @@ -4,6 +4,7 @@ from pandas import ( Categorical, Index, + MultiIndex, NaT, Period, PeriodIndex, @@ -345,3 +346,16 @@ def test_reindex_periodindex_with_object(p_values, o_values, values, expected_va result = ser.reindex(object_index) expected = Series(expected_values, index=object_index) tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("values", [[["a"], ["x"]], [[], []]]) +def test_reindex_empty_with_level(values): + # GH41170 + ser = Series( + range(len(values[0])), index=MultiIndex.from_arrays(values), dtype="object" + ) + result = ser.reindex(np.array(["b"]), level=0) + expected = Series( + index=MultiIndex(levels=[["b"], values[1]], codes=[[], []]), dtype="object" + ) + tm.assert_series_equal(result, expected)
- [x] closes #41170 - [x] closes #40235 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry stumbled across a bug in MultiIndex.reindex, which caused the ValueError from the op
https://api.github.com/repos/pandas-dev/pandas/pulls/41358
2021-05-06T20:44:43Z
2021-05-12T01:20:35Z
2021-05-12T01:20:34Z
2021-05-12T09:19:14Z
ENH: Implemented optional initializer for pd.concat (#40820)
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index cf3dd1b0e3226..640940b67e6b9 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -226,7 +226,7 @@ Other enhancements - :meth:`.GroupBy.any` and :meth:`.GroupBy.all` return a ``BooleanDtype`` for columns with nullable data types (:issue:`33449`) - Constructing a :class:`DataFrame` or :class:`Series` with the ``data`` argument being a Python iterable that is *not* a NumPy ``ndarray`` consisting of NumPy scalars will now result in a dtype with a precision the maximum of the NumPy scalars; this was already the case when ``data`` is a NumPy ``ndarray`` (:issue:`40908`) - Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`) -- +- ``pandas.concat()`` now accepts a new parameter ``initializer`` which will allow for the concatenation of empty sequences and returns the DataFrame/Series object passed into the initializer parameter. (:issue:`40820`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index b3b453ea6355a..c2c9d3ec61198 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -64,6 +64,7 @@ def concat( verify_integrity: bool = False, sort: bool = False, copy: bool = True, + initializer: NDFrame | None = None, ) -> DataFrame: ... @@ -80,6 +81,7 @@ def concat( verify_integrity: bool = False, sort: bool = False, copy: bool = True, + initializer: NDFrame | None = None, ) -> FrameOrSeriesUnion: ... @@ -95,6 +97,7 @@ def concat( verify_integrity: bool = False, sort: bool = False, copy: bool = True, + initializer: NDFrame | None = None, ) -> FrameOrSeriesUnion: """ Concatenate pandas objects along a particular axis with optional set logic @@ -144,6 +147,12 @@ def concat( copy : bool, default True If False, do not copy data unnecessarily. + initializer : a Series or DataFrame object, default None + If the optional initializer is present, it is placed before the items of + ``objs`` before the concatenation, and serves as a default when the + sequence is empty. + + .. versionadded:: 1.3.0 Returns ------- @@ -151,7 +160,9 @@ def concat( When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along - the columns (axis=1), a ``DataFrame`` is returned. + the columns (axis=1), a ``DataFrame`` is returned. When concatenating + on an empty sequence, the ``DataFrame`` or ``Series`` object specified + in the ``initializer`` parameter will be returned. See Also -------- @@ -286,6 +297,23 @@ def concat( Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] + + >>> pd.concat([]) + Traceback (most recent call last): + ... + ValueError: No objects to concatenate and no initializer not set + + >>> pd.concat([], initializer=pd.Series()) + Series([], dtype: object) + + >>> pd.concat([], initializer=pd.DataFrame()) + Empty DataFrame + Columns: [] + Index: [] + + Concatenating on an empty sequence with an initializer would yield + the object specified by the initializer. + >>> """ op = _Concatenator( objs, @@ -298,6 +326,7 @@ def concat( verify_integrity=verify_integrity, copy=copy, sort=sort, + initializer=initializer, ) return op.get_result() @@ -320,6 +349,7 @@ def __init__( verify_integrity: bool = False, copy: bool = True, sort=False, + initializer: NDFrame | None = None, ): if isinstance(objs, (ABCSeries, ABCDataFrame, str)): raise TypeError( @@ -344,7 +374,11 @@ def __init__( objs = list(objs) if len(objs) == 0: - raise ValueError("No objects to concatenate") + if initializer is None: + raise ValueError("No objects to concatenate and no initializer set") + + if initializer is not None: + objs = [initializer] + objs if keys is None: objs = list(com.not_none(*objs)) diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index 96b88dc61cfed..6f486233a8397 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -348,10 +348,44 @@ def test_concat_single_with_key(self): expected = concat([df, df], keys=["foo", "bar"]) tm.assert_frame_equal(result, expected[:10]) - def test_concat_no_items_raises(self): - with pytest.raises(ValueError, match="No objects to concatenate"): + def test_concat_no_items_and_initializer_raises(self): + with pytest.raises( + ValueError, match="No objects to concatenate and no initializer set" + ): concat([]) + def test_concat_no_items_and_series_initializer(self): + result = concat([], initializer=Series()) + expected = Series() + tm.assert_series_equal(result, expected) + + expected = Series(np.random.randn(10)) + series = expected.copy() + result = concat([], initializer=series) + tm.assert_series_equal(result, expected) + + def test_concat_series_and_series_initializer(self): + series = Series(np.random.randn(10)) + + result = concat([series[5:]], initializer=series[:5]) + tm.assert_series_equal(result, series) + + def test_concat_no_items_and_df_initializer(self): + result = concat([], initializer=DataFrame()) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + expected = DataFrame(np.random.randn(1, 3)) + df = expected.copy() + result = concat([], initializer=df) + tm.assert_frame_equal(result, expected) + + def test_concat_df_and_df_initializer(self): + df = DataFrame(np.random.randn(10, 4)) + + result = concat([df[5:]], initializer=df[:5]) + tm.assert_frame_equal(result, df) + def test_concat_exclude_none(self): df = DataFrame(np.random.randn(10, 4)) @@ -361,6 +395,13 @@ def test_concat_exclude_none(self): with pytest.raises(ValueError, match="All objects passed were None"): concat([None, None]) + def test_concat_exclude_none_with_initializer(self): + df = DataFrame(np.random.randn(10, 4)) + + result = concat([None, None], initializer=df) + expected = df.copy() + tm.assert_frame_equal(result, expected) + def test_concat_keys_with_none(self): # #1649 df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
- [x] closes #40820 - [x] tests added / passed (passed on azure checks) - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry A proposed solution for issue #40820, to accept an optional argument ``initializer`` which allows for concatenating empty sequences and return the object passed to initializer -- which can be useful for cases where users are running pd.concat on sequence variables which might potentially be empty -- and would like the result of the concatenation to be initialized to an empty Series/DataFrame The new argument is made optional, and the ValueError is still thrown for cases where the ``initializer`` is not specified -- this should maintain expected behaviour / backward compatibility
https://api.github.com/repos/pandas-dev/pandas/pulls/41355
2021-05-06T18:31:29Z
2021-05-07T17:16:53Z
null
2021-05-07T17:16:54Z
REF: preserve Index dtype in BlockManager._combine
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 71e6d14e6a716..dd02771f735a6 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -274,9 +274,6 @@ def apply( else: new_axes = self._axes - if len(result_arrays) == 0: - return self.make_empty(new_axes) - # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; # expected "List[Union[ndarray, ExtensionArray]]" return type(self)(result_arrays, new_axes) # type: ignore[arg-type] @@ -487,7 +484,7 @@ def _get_data_subset(self: T, predicate: Callable) -> T: indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)] arrays = [self.arrays[i] for i in indices] # TODO copy? - new_axes = [self._axes[0], self._axes[1][np.array(indices, dtype="int64")]] + new_axes = [self._axes[0], self._axes[1][np.array(indices, dtype="intp")]] return type(self)(arrays, new_axes, verify_integrity=False) def get_bool_data(self: T, copy: bool = False) -> T: @@ -696,7 +693,6 @@ def _equal_values(self, other) -> bool: return True # TODO - # equals # to_dict diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 73f463997c085..cdb7d8a6ccd45 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -345,9 +345,6 @@ def apply( if ignore_failures: return self._combine(result_blocks) - if len(result_blocks) == 0: - return self.make_empty(self.axes) - return type(self).from_blocks(result_blocks, self.axes) def where(self: T, other, cond, align: bool, errors: str) -> T: @@ -532,6 +529,13 @@ def _combine( ) -> T: """ return a new manager with the blocks """ if len(blocks) == 0: + if self.ndim == 2: + # retain our own Index dtype + if index is not None: + axes = [self.items[:0], index] + else: + axes = [self.items[:0]] + self.axes[1:] + return self.make_empty(axes) return self.make_empty() # FIXME: optimization potential @@ -1233,7 +1237,7 @@ def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: index = Index(range(result_blocks[0].values.shape[-1])) if ignore_failures: - return self._combine(result_blocks, index=index) + return self._combine(result_blocks, copy=False, index=index) return type(self).from_blocks(result_blocks, [self.axes[0], index]) @@ -1270,7 +1274,7 @@ def reduce( new_mgr = self._combine(res_blocks, copy=False, index=index) else: indexer = [] - new_mgr = type(self).from_blocks([], [Index([]), index]) + new_mgr = type(self).from_blocks([], [self.items[:0], index]) else: indexer = np.arange(self.shape[0]) new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index d07f843f4acfc..771d31aa6865b 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -85,7 +85,7 @@ def test_rename(self): # multiple axes at once - def test_get_numeric_data(self, using_array_manager): + def test_get_numeric_data(self): n = 4 kwargs = { @@ -100,9 +100,9 @@ def test_get_numeric_data(self, using_array_manager): # non-inclusion result = o._get_bool_data() expected = self._construct(n, value="empty", **kwargs) - if using_array_manager and isinstance(o, DataFrame): - # INFO(ArrayManager) preserve the dtype of the columns Index - expected.columns = expected.columns.astype("int64") + if isinstance(o, DataFrame): + # preserve columns dtype + expected.columns = o.columns[:0] self._compare(result, expected) # get the bool data
needed for fixing the DataFrameGroupBy part of #41291
https://api.github.com/repos/pandas-dev/pandas/pulls/41354
2021-05-06T17:48:46Z
2021-05-06T23:09:39Z
2021-05-06T23:09:39Z
2021-05-06T23:19:47Z
whatsnew about new engine param for to_sql (follow up to #40556)
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index a81dda4e7dfdd..2b560f7f2499f 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -608,7 +608,7 @@ Other API changes ^^^^^^^^^^^^^^^^^ - Partially initialized :class:`CategoricalDtype` (i.e. those with ``categories=None`` objects will no longer compare as equal to fully initialized dtype objects. - Accessing ``_constructor_expanddim`` on a :class:`DataFrame` and ``_constructor_sliced`` on a :class:`Series` now raise an ``AttributeError``. Previously a ``NotImplementedError`` was raised (:issue:`38782`) -- +- Added new ``engine`` and ``**engine_kwargs`` parameters to :meth:`DataFrame.to_sql` to support other future "SQL engines". Currently we still only use ``SQLAlchemy`` under the hood, but more engines are planned to be supported such as ``turbodbc`` (:issue:`36893`) Build =====
- [ ] xref #36893 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Follow up to #40556, per this comment https://github.com/pandas-dev/pandas/pull/40556#issuecomment-831413175
https://api.github.com/repos/pandas-dev/pandas/pulls/41353
2021-05-06T15:41:02Z
2021-05-06T23:20:15Z
2021-05-06T23:20:15Z
2021-05-06T23:38:44Z
TST/REF: split out replace regex into class
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index e6ed60dc2bb08..d2974a5d08a60 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -57,217 +57,6 @@ def test_replace_inplace(self, datetime_frame, float_string_frame): assert return_value is None tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) - def test_regex_replace_scalar(self, mix_ab): - obj = {"a": list("ab.."), "b": list("efgh")} - dfobj = DataFrame(obj) - dfmix = DataFrame(mix_ab) - - # simplest cases - # regex -> value - # obj frame - res = dfobj.replace(r"\s*\.\s*", np.nan, regex=True) - tm.assert_frame_equal(dfobj, res.fillna(".")) - - # mixed - res = dfmix.replace(r"\s*\.\s*", np.nan, regex=True) - tm.assert_frame_equal(dfmix, res.fillna(".")) - - # regex -> regex - # obj frame - res = dfobj.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True) - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True) - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - # everything with compiled regexs as well - res = dfobj.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True) - tm.assert_frame_equal(dfobj, res.fillna(".")) - - # mixed - res = dfmix.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True) - tm.assert_frame_equal(dfmix, res.fillna(".")) - - # regex -> regex - # obj frame - res = dfobj.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1") - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1") - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - res = dfmix.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1") - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - res = dfmix.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1") - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - def test_regex_replace_scalar_inplace(self, mix_ab): - obj = {"a": list("ab.."), "b": list("efgh")} - dfobj = DataFrame(obj) - dfmix = DataFrame(mix_ab) - - # simplest cases - # regex -> value - # obj frame - res = dfobj.copy() - return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) - assert return_value is None - tm.assert_frame_equal(dfobj, res.fillna(".")) - - # mixed - res = dfmix.copy() - return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) - assert return_value is None - tm.assert_frame_equal(dfmix, res.fillna(".")) - - # regex -> regex - # obj frame - res = dfobj.copy() - return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) - assert return_value is None - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.copy() - return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) - assert return_value is None - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - # everything with compiled regexs as well - res = dfobj.copy() - return_value = res.replace( - re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True - ) - assert return_value is None - tm.assert_frame_equal(dfobj, res.fillna(".")) - - # mixed - res = dfmix.copy() - return_value = res.replace( - re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True - ) - assert return_value is None - tm.assert_frame_equal(dfmix, res.fillna(".")) - - # regex -> regex - # obj frame - res = dfobj.copy() - return_value = res.replace( - re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True - ) - assert return_value is None - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.copy() - return_value = res.replace( - re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True - ) - assert return_value is None - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - res = dfobj.copy() - return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) - assert return_value is None - tm.assert_frame_equal(dfobj, res.fillna(".")) - - # mixed - res = dfmix.copy() - return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) - assert return_value is None - tm.assert_frame_equal(dfmix, res.fillna(".")) - - # regex -> regex - # obj frame - res = dfobj.copy() - return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) - assert return_value is None - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.copy() - return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) - assert return_value is None - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - # everything with compiled regexs as well - res = dfobj.copy() - return_value = res.replace( - regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True - ) - assert return_value is None - tm.assert_frame_equal(dfobj, res.fillna(".")) - - # mixed - res = dfmix.copy() - return_value = res.replace( - regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True - ) - assert return_value is None - tm.assert_frame_equal(dfmix, res.fillna(".")) - - # regex -> regex - # obj frame - res = dfobj.copy() - return_value = res.replace( - regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True - ) - assert return_value is None - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.copy() - return_value = res.replace( - regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True - ) - assert return_value is None - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - def test_regex_replace_list_obj(self): obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")} dfobj = DataFrame(obj) @@ -1689,3 +1478,216 @@ def test_replace_bytes(self, frame_or_series): expected = obj.copy() obj = obj.replace({None: np.nan}) tm.assert_equal(obj, expected) + + +class TestDataFrameReplaceRegex: + def test_regex_replace_scalar(self, mix_ab): + obj = {"a": list("ab.."), "b": list("efgh")} + dfobj = DataFrame(obj) + dfmix = DataFrame(mix_ab) + + # simplest cases + # regex -> value + # obj frame + res = dfobj.replace(r"\s*\.\s*", np.nan, regex=True) + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.replace(r"\s*\.\s*", np.nan, regex=True) + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True) + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True) + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + # everything with compiled regexs as well + res = dfobj.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True) + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True) + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1") + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1") + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + res = dfmix.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1") + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + res = dfmix.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1") + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + def test_regex_replace_scalar_inplace(self, mix_ab): + obj = {"a": list("ab.."), "b": list("efgh")} + dfobj = DataFrame(obj) + dfmix = DataFrame(mix_ab) + + # simplest cases + # regex -> value + # obj frame + res = dfobj.copy() + return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + assert return_value is None + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.copy() + return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + assert return_value is None + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.copy() + return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + assert return_value is None + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.copy() + return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + assert return_value is None + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + # everything with compiled regexs as well + res = dfobj.copy() + return_value = res.replace( + re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True + ) + assert return_value is None + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.copy() + return_value = res.replace( + re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True + ) + assert return_value is None + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.copy() + return_value = res.replace( + re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True + ) + assert return_value is None + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.copy() + return_value = res.replace( + re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True + ) + assert return_value is None + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + res = dfobj.copy() + return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + assert return_value is None + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.copy() + return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + assert return_value is None + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.copy() + return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + assert return_value is None + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.copy() + return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + assert return_value is None + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + # everything with compiled regexs as well + res = dfobj.copy() + return_value = res.replace( + regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True + ) + assert return_value is None + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.copy() + return_value = res.replace( + regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True + ) + assert return_value is None + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.copy() + return_value = res.replace( + regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True + ) + assert return_value is None + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.copy() + return_value = res.replace( + regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True + ) + assert return_value is None + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec)
Can move a bunch more, but the diff gets really garbled when moving more, so easier to do in chunks
https://api.github.com/repos/pandas-dev/pandas/pulls/41352
2021-05-06T15:02:42Z
2021-05-12T01:22:16Z
2021-05-12T01:22:16Z
2021-05-12T01:52:52Z
BUG : Min/max markers on box plot are not visible with 'dark_background' (#40769)
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index a81dda4e7dfdd..84144f234d7a9 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -849,6 +849,7 @@ Plotting - Prevent warnings when matplotlib's ``constrained_layout`` is enabled (:issue:`25261`) - Bug in :func:`DataFrame.plot` was showing the wrong colors in the legend if the function was called repeatedly and some calls used ``yerr`` while others didn't (partial fix of :issue:`39522`) - Bug in :func:`DataFrame.plot` was showing the wrong colors in the legend if the function was called repeatedly and some calls used ``secondary_y`` and others use ``legend=False`` (:issue:`40044`) +- Bug in :meth:`DataFrame.plot.box` in box plot when ``dark_background`` theme was selected, caps or min/max markers for the plot was not visible (:issue:`40769`) Groupby/resample/rolling diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 6a81e3ae43b5d..21f30c1311e17 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -101,7 +101,7 @@ def _validate_color_args(self): self._boxes_c = colors[0] self._whiskers_c = colors[0] self._medians_c = colors[2] - self._caps_c = "k" # mpl default + self._caps_c = colors[0] def _get_colors(self, num_colors=None, color_kwds="color"): pass diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py index 6844124d15f9d..a9b691f2a42b9 100644 --- a/pandas/tests/plotting/frame/test_frame_color.py +++ b/pandas/tests/plotting/frame/test_frame_color.py @@ -546,7 +546,13 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None): df = DataFrame(np.random.randn(5, 5)) bp = df.plot.box(return_type="dict") - _check_colors(bp, default_colors[0], default_colors[0], default_colors[2]) + _check_colors( + bp, + default_colors[0], + default_colors[0], + default_colors[2], + default_colors[0], + ) tm.close() dict_colors = { @@ -569,7 +575,7 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None): # partial colors dict_colors = {"whiskers": "c", "medians": "m"} bp = df.plot.box(color=dict_colors, return_type="dict") - _check_colors(bp, default_colors[0], "c", "m") + _check_colors(bp, default_colors[0], "c", "m", default_colors[0]) tm.close() from matplotlib import cm @@ -577,12 +583,12 @@ def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None): # Test str -> colormap functionality bp = df.plot.box(colormap="jet", return_type="dict") jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)] - _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2]) + _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0]) tm.close() # Test colormap functionality bp = df.plot.box(colormap=cm.jet, return_type="dict") - _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2]) + _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0]) tm.close() # string color is applied to all artists except fliers diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 448679d562a4a..dbceeae44a493 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -195,6 +195,39 @@ def test_color_kwd(self, colors_kwd, expected): for k, v in expected.items(): assert result[k][0].get_color() == v + @pytest.mark.parametrize( + "scheme,expected", + [ + ( + "dark_background", + { + "boxes": "#8dd3c7", + "whiskers": "#8dd3c7", + "medians": "#bfbbd9", + "caps": "#8dd3c7", + }, + ), + ( + "default", + { + "boxes": "#1f77b4", + "whiskers": "#1f77b4", + "medians": "#2ca02c", + "caps": "#1f77b4", + }, + ), + ], + ) + def test_colors_in_theme(self, scheme, expected): + # GH: 40769 + df = DataFrame(np.random.rand(10, 2)) + import matplotlib.pyplot as plt + + plt.style.use(scheme) + result = df.plot.box(return_type="dict") + for k, v in expected.items(): + assert result[k][0].get_color() == v + @pytest.mark.parametrize( "dict_colors, msg", [({"boxes": "r", "invalid_key": "r"}, "invalid key 'invalid_key'")],
- [x] closes #40769 - [x] tests added / passed - [x] Ensure all linting tests pass, see here for how to run them Color in boxplot.py should be a hex value. Letter based color will be at matplotlib level. Using same color as that of boxes for the caps as well. The color will be fetched based on the selected theme. We are over riding color with "k" , rather need to use the one set at matplotlib level. Before Fix : Caps for the box plot is not visible : ![![image](https://user-images.githubusercontent.com/9417467/117279259-2634b480-ae7f-11eb-933c-350cff0133de.png)](https://user-images.githubusercontent.com/9417467/117279207-1a48f280-ae7f-11eb-9f0f-fe477d80dc77.png) After Fix : Caps for the box plow will be visible in dark background mode : ![image](https://user-images.githubusercontent.com/9417467/117279282-2df45900-ae7f-11eb-8107-9fe3ae2fe15f.png) Whats New : Box Plot's Caps will have same color as boxes unless color is explicitly specified by user arguments. Hence theme changes will not adversly effect caps color.
https://api.github.com/repos/pandas-dev/pandas/pulls/41349
2021-05-06T09:55:00Z
2021-05-10T09:34:29Z
2021-05-10T09:34:28Z
2021-05-10T09:34:38Z
REF: Share Block/ArrayManager set_axis
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 18506b871bda6..dcb962d737903 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1625,14 +1625,14 @@ def _wrap_transformed_output( def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame: if not self.as_index: index = np.arange(mgr.shape[1]) - mgr.set_axis(1, ibase.Index(index), verify_integrity=False) + mgr.set_axis(1, ibase.Index(index)) result = self.obj._constructor(mgr) self._insert_inaxis_grouper_inplace(result) result = result._consolidate() else: index = self.grouper.result_index - mgr.set_axis(1, index, verify_integrity=False) + mgr.set_axis(1, index) result = self.obj._constructor(mgr) if self.axis == 1: diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 71e6d14e6a716..d4dee81bf8081 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -160,21 +160,10 @@ def _normalize_axis(axis: int) -> int: axis = 1 if axis == 0 else 0 return axis - def set_axis( - self, axis: int, new_labels: Index, verify_integrity: bool = True - ) -> None: + def set_axis(self, axis: int, new_labels: Index) -> None: # Caller is responsible for ensuring we have an Index object. + self._validate_set_axis(axis, new_labels) axis = self._normalize_axis(axis) - if verify_integrity: - old_len = len(self._axes[axis]) - new_len = len(new_labels) - - if new_len != old_len: - raise ValueError( - f"Length mismatch: Expected axis has {old_len} elements, new " - f"values have {new_len} elements" - ) - self._axes[axis] = new_labels def consolidate(self: T) -> T: diff --git a/pandas/core/internals/base.py b/pandas/core/internals/base.py index 3a8ff8237b62f..f8ccb10655ea1 100644 --- a/pandas/core/internals/base.py +++ b/pandas/core/internals/base.py @@ -44,6 +44,23 @@ def ndim(self) -> int: def shape(self) -> Shape: return tuple(len(ax) for ax in self.axes) + @final + def _validate_set_axis(self, axis: int, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + old_len = len(self.axes[axis]) + new_len = len(new_labels) + + if axis == 1 and len(self.items) == 0: + # If we are setting the index on a DataFrame with no columns, + # it is OK to change the length. + pass + + elif new_len != old_len: + raise ValueError( + f"Length mismatch: Expected axis has {old_len} elements, new " + f"values have {new_len} elements" + ) + def reindex_indexer( self: T, new_axis, diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 73f463997c085..836a903248f1d 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -211,20 +211,9 @@ def _normalize_axis(self, axis: int) -> int: axis = 1 if axis == 0 else 0 return axis - def set_axis( - self, axis: int, new_labels: Index, verify_integrity: bool = True - ) -> None: + def set_axis(self, axis: int, new_labels: Index) -> None: # Caller is responsible for ensuring we have an Index object. - if verify_integrity: - old_len = len(self.axes[axis]) - new_len = len(new_labels) - - if new_len != old_len: - raise ValueError( - f"Length mismatch: Expected axis has {old_len} elements, new " - f"values have {new_len} elements" - ) - + self._validate_set_axis(axis, new_labels) self.axes[axis] = new_labels @property
Fix the need for verify_integrity=False
https://api.github.com/repos/pandas-dev/pandas/pulls/41348
2021-05-06T04:07:04Z
2021-05-06T23:21:27Z
2021-05-06T23:21:27Z
2021-05-06T23:23:13Z
REF: move masked dispatch inside _ea_wrap_cython_operation
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index d1a46c1c36439..26b71b396f4b5 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -327,6 +327,14 @@ def _ea_wrap_cython_operation( re-wrap if appropriate. """ # TODO: general case implementation overridable by EAs. + if isinstance(values, BaseMaskedArray) and self.uses_mask(): + return self._masked_ea_wrap_cython_operation( + values, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + **kwargs, + ) orig_values = values if isinstance(orig_values, (DatetimeArray, PeriodArray)): @@ -614,22 +622,13 @@ def cython_operation( if not isinstance(values, np.ndarray): # i.e. ExtensionArray - if isinstance(values, BaseMaskedArray) and self.uses_mask(): - return self._masked_ea_wrap_cython_operation( - values, - min_count=min_count, - ngroups=ngroups, - comp_ids=comp_ids, - **kwargs, - ) - else: - return self._ea_wrap_cython_operation( - values, - min_count=min_count, - ngroups=ngroups, - comp_ids=comp_ids, - **kwargs, - ) + return self._ea_wrap_cython_operation( + values, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + **kwargs, + ) return self._cython_op_ndim_compat( values,
After the groupby cleanups, masked dispatch can be moved inside `_ea_wrap_cython_operation` without adding args. Seemed like the preferred place for it in the original pr.
https://api.github.com/repos/pandas-dev/pandas/pulls/41347
2021-05-06T03:47:47Z
2021-05-06T13:19:40Z
2021-05-06T13:19:40Z
2021-05-06T15:07:01Z
TST: catch/suppress test warnings
diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index dbe2df5238c7e..50ecb74924e2a 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -226,7 +226,10 @@ ), pytest.param( (pd.DataFrame, frame_mi_data, operator.methodcaller("count", level="A")), - marks=not_implemented_mark, + marks=[ + not_implemented_mark, + pytest.mark.filterwarnings("ignore:Using the level keyword:FutureWarning"), + ], ), pytest.param( (pd.DataFrame, frame_data, operator.methodcaller("nunique")), diff --git a/pandas/tests/io/__init__.py b/pandas/tests/io/__init__.py index 39474dedba78c..3231e38b985af 100644 --- a/pandas/tests/io/__init__.py +++ b/pandas/tests/io/__init__.py @@ -5,6 +5,12 @@ pytest.mark.filterwarnings( "ignore:PY_SSIZE_T_CLEAN will be required.*:DeprecationWarning" ), + pytest.mark.filterwarnings( + "ignore:Block.is_categorical is deprecated:DeprecationWarning" + ), + pytest.mark.filterwarnings( + r"ignore:`np\.bool` is a deprecated alias:DeprecationWarning" + ), # xlrd pytest.mark.filterwarnings( "ignore:This method will be removed in future versions:DeprecationWarning" diff --git a/pandas/tests/io/formats/style/test_highlight.py b/pandas/tests/io/formats/style/test_highlight.py index 9e956e055d1aa..a681d7c65a190 100644 --- a/pandas/tests/io/formats/style/test_highlight.py +++ b/pandas/tests/io/formats/style/test_highlight.py @@ -5,6 +5,7 @@ DataFrame, IndexSlice, ) +import pandas._testing as tm pytest.importorskip("jinja2") @@ -54,7 +55,9 @@ def test_highlight_minmax_basic(df, f): } if f == "highlight_min": df = -df - result = getattr(df.style, f)(axis=1, color="red")._compute().ctx + with tm.assert_produces_warning(RuntimeWarning): + # All-NaN slice encountered + result = getattr(df.style, f)(axis=1, color="red")._compute().ctx assert result == expected diff --git a/pandas/tests/io/pytables/__init__.py b/pandas/tests/io/pytables/__init__.py index d3735f8863c3b..cbf848a401dc4 100644 --- a/pandas/tests/io/pytables/__init__.py +++ b/pandas/tests/io/pytables/__init__.py @@ -9,4 +9,7 @@ pytest.mark.filterwarnings( r"ignore:`np\.object` is a deprecated alias:DeprecationWarning" ), + pytest.mark.filterwarnings( + r"ignore:`np\.bool` is a deprecated alias:DeprecationWarning" + ), ] diff --git a/pandas/tests/io/pytables/test_categorical.py b/pandas/tests/io/pytables/test_categorical.py index 4928a70f90960..0b3d56ebf959e 100644 --- a/pandas/tests/io/pytables/test_categorical.py +++ b/pandas/tests/io/pytables/test_categorical.py @@ -216,7 +216,7 @@ def test_convert_value(setup_path, where: str, df: DataFrame, expected: DataFram max_widths = {"col": 1} categorical_values = sorted(df.col.unique()) expected.col = expected.col.astype("category") - expected.col.cat.set_categories(categorical_values, inplace=True) + expected.col = expected.col.cat.set_categories(categorical_values) with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df", format="table", min_itemsize=max_widths) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 30666a716859a..f66451cd72309 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -3,7 +3,10 @@ from io import BytesIO import os import pathlib -from warnings import catch_warnings +from warnings import ( + catch_warnings, + filterwarnings, +) import numpy as np import pytest @@ -36,7 +39,10 @@ _HAVE_PYARROW = False try: - import fastparquet + with catch_warnings(): + # `np.bool` is a deprecated alias... + filterwarnings("ignore", "`np.bool`", category=DeprecationWarning) + import fastparquet _HAVE_FASTPARQUET = True except ImportError:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41346
2021-05-06T03:13:11Z
2021-05-06T13:19:05Z
2021-05-06T13:19:05Z
2021-05-06T15:13:02Z
TST/CLN: remove frame replace class
diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index e6ed60dc2bb08..a1f2665774c35 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -33,1659 +33,1704 @@ def mix_abc() -> Dict[str, List[Union[float, str]]]: return {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]} -class TestDataFrameReplace: - def test_replace_inplace(self, datetime_frame, float_string_frame): - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan - - tsframe = datetime_frame.copy() - return_value = tsframe.replace(np.nan, 0, inplace=True) - assert return_value is None - tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) - - # mixed type - mf = float_string_frame - mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan - mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan - - result = float_string_frame.replace(np.nan, 0) - expected = float_string_frame.fillna(value=0) - tm.assert_frame_equal(result, expected) +def test_replace_inplace(datetime_frame, float_string_frame): + datetime_frame["A"][:5] = np.nan + datetime_frame["A"][-5:] = np.nan + + tsframe = datetime_frame.copy() + return_value = tsframe.replace(np.nan, 0, inplace=True) + assert return_value is None + tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) + + # mixed type + mf = float_string_frame + mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan + mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan + + result = float_string_frame.replace(np.nan, 0) + expected = float_string_frame.fillna(value=0) + tm.assert_frame_equal(result, expected) + + tsframe = datetime_frame.copy() + return_value = tsframe.replace([np.nan], [0], inplace=True) + assert return_value is None + tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) + + +def test_regex_replace_scalar(mix_ab): + obj = {"a": list("ab.."), "b": list("efgh")} + dfobj = DataFrame(obj) + dfmix = DataFrame(mix_ab) + + # simplest cases + # regex -> value + # obj frame + res = dfobj.replace(r"\s*\.\s*", np.nan, regex=True) + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.replace(r"\s*\.\s*", np.nan, regex=True) + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True) + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True) + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + # everything with compiled regexs as well + res = dfobj.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True) + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True) + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1") + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1") + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + res = dfmix.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1") + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + res = dfmix.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1") + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + +def test_regex_replace_scalar_inplace(mix_ab): + obj = {"a": list("ab.."), "b": list("efgh")} + dfobj = DataFrame(obj) + dfmix = DataFrame(mix_ab) + + # simplest cases + # regex -> value + # obj frame + res = dfobj.copy() + return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + assert return_value is None + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.copy() + return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + assert return_value is None + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.copy() + return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + assert return_value is None + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.copy() + return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + assert return_value is None + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + # everything with compiled regexs as well + res = dfobj.copy() + return_value = res.replace( + re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True + ) + assert return_value is None + tm.assert_frame_equal(dfobj, res.fillna(".")) - tsframe = datetime_frame.copy() - return_value = tsframe.replace([np.nan], [0], inplace=True) - assert return_value is None - tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) - - def test_regex_replace_scalar(self, mix_ab): - obj = {"a": list("ab.."), "b": list("efgh")} - dfobj = DataFrame(obj) - dfmix = DataFrame(mix_ab) - - # simplest cases - # regex -> value - # obj frame - res = dfobj.replace(r"\s*\.\s*", np.nan, regex=True) - tm.assert_frame_equal(dfobj, res.fillna(".")) - - # mixed - res = dfmix.replace(r"\s*\.\s*", np.nan, regex=True) - tm.assert_frame_equal(dfmix, res.fillna(".")) - - # regex -> regex - # obj frame - res = dfobj.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True) - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True) - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - # everything with compiled regexs as well - res = dfobj.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True) - tm.assert_frame_equal(dfobj, res.fillna(".")) - - # mixed - res = dfmix.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True) - tm.assert_frame_equal(dfmix, res.fillna(".")) - - # regex -> regex - # obj frame - res = dfobj.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1") - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1") - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - res = dfmix.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1") - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - res = dfmix.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1") - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - def test_regex_replace_scalar_inplace(self, mix_ab): - obj = {"a": list("ab.."), "b": list("efgh")} - dfobj = DataFrame(obj) - dfmix = DataFrame(mix_ab) - - # simplest cases - # regex -> value - # obj frame - res = dfobj.copy() - return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) - assert return_value is None - tm.assert_frame_equal(dfobj, res.fillna(".")) + # mixed + res = dfmix.copy() + return_value = res.replace( + re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True + ) + assert return_value is None + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.copy() + return_value = res.replace( + re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True + ) + assert return_value is None + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.copy() + return_value = res.replace( + re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True + ) + assert return_value is None + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + res = dfobj.copy() + return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + assert return_value is None + tm.assert_frame_equal(dfobj, res.fillna(".")) + + # mixed + res = dfmix.copy() + return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + assert return_value is None + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.copy() + return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + assert return_value is None + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.copy() + return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + assert return_value is None + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + # everything with compiled regexs as well + res = dfobj.copy() + return_value = res.replace( + regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True + ) + assert return_value is None + tm.assert_frame_equal(dfobj, res.fillna(".")) - # mixed - res = dfmix.copy() - return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) - assert return_value is None - tm.assert_frame_equal(dfmix, res.fillna(".")) + # mixed + res = dfmix.copy() + return_value = res.replace( + regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True + ) + assert return_value is None + tm.assert_frame_equal(dfmix, res.fillna(".")) + + # regex -> regex + # obj frame + res = dfobj.copy() + return_value = res.replace( + regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True + ) + assert return_value is None + objc = obj.copy() + objc["a"] = ["a", "b", "...", "..."] + expec = DataFrame(objc) + tm.assert_frame_equal(res, expec) + + # with mixed + res = dfmix.copy() + return_value = res.replace( + regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True + ) + assert return_value is None + mixc = mix_ab.copy() + mixc["b"] = ["a", "b", "...", "..."] + expec = DataFrame(mixc) + tm.assert_frame_equal(res, expec) + + +def test_regex_replace_list_obj(): + obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")} + dfobj = DataFrame(obj) + + # lists of regexes and values + # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] + to_replace_res = [r"\s*\.\s*", r"e|f|g"] + values = [np.nan, "crap"] + res = dfobj.replace(to_replace_res, values, regex=True) + expec = DataFrame( + { + "a": ["a", "b", np.nan, np.nan], + "b": ["crap"] * 3 + ["h"], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] + to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"] + values = [r"\1\1", r"\1_crap"] + res = dfobj.replace(to_replace_res, values, regex=True) + expec = DataFrame( + { + "a": ["a", "b", "..", ".."], + "b": ["e_crap", "f_crap", "g_crap", "h"], + "c": ["h", "e_crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN + # or vN)] + to_replace_res = [r"\s*(\.)\s*", r"e"] + values = [r"\1\1", r"crap"] + res = dfobj.replace(to_replace_res, values, regex=True) + expec = DataFrame( + { + "a": ["a", "b", "..", ".."], + "b": ["crap", "f", "g", "h"], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + to_replace_res = [r"\s*(\.)\s*", r"e"] + values = [r"\1\1", r"crap"] + res = dfobj.replace(value=values, regex=to_replace_res) + expec = DataFrame( + { + "a": ["a", "b", "..", ".."], + "b": ["crap", "f", "g", "h"], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + +def test_regex_replace_list_obj_inplace(): + # same as above with inplace=True + # lists of regexes and values + obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")} + dfobj = DataFrame(obj) + + # lists of regexes and values + # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] + to_replace_res = [r"\s*\.\s*", r"e|f|g"] + values = [np.nan, "crap"] + res = dfobj.copy() + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None + expec = DataFrame( + { + "a": ["a", "b", np.nan, np.nan], + "b": ["crap"] * 3 + ["h"], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] + to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"] + values = [r"\1\1", r"\1_crap"] + res = dfobj.copy() + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None + expec = DataFrame( + { + "a": ["a", "b", "..", ".."], + "b": ["e_crap", "f_crap", "g_crap", "h"], + "c": ["h", "e_crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN + # or vN)] + to_replace_res = [r"\s*(\.)\s*", r"e"] + values = [r"\1\1", r"crap"] + res = dfobj.copy() + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None + expec = DataFrame( + { + "a": ["a", "b", "..", ".."], + "b": ["crap", "f", "g", "h"], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + to_replace_res = [r"\s*(\.)\s*", r"e"] + values = [r"\1\1", r"crap"] + res = dfobj.copy() + return_value = res.replace(value=values, regex=to_replace_res, inplace=True) + assert return_value is None + expec = DataFrame( + { + "a": ["a", "b", "..", ".."], + "b": ["crap", "f", "g", "h"], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + +def test_regex_replace_list_mixed(mix_ab): + # mixed frame to make sure this doesn't break things + dfmix = DataFrame(mix_ab) + + # lists of regexes and values + # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] + to_replace_res = [r"\s*\.\s*", r"a"] + values = [np.nan, "crap"] + mix2 = {"a": list(range(4)), "b": list("ab.."), "c": list("halo")} + dfmix2 = DataFrame(mix2) + res = dfmix2.replace(to_replace_res, values, regex=True) + expec = DataFrame( + { + "a": mix2["a"], + "b": ["crap", "b", np.nan, np.nan], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] + to_replace_res = [r"\s*(\.)\s*", r"(a|b)"] + values = [r"\1\1", r"\1_crap"] + res = dfmix.replace(to_replace_res, values, regex=True) + expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN + # or vN)] + to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] + values = [r"\1\1", r"crap", r"\1_crap"] + res = dfmix.replace(to_replace_res, values, regex=True) + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] + values = [r"\1\1", r"crap", r"\1_crap"] + res = dfmix.replace(regex=to_replace_res, value=values) + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + +def test_regex_replace_list_mixed_inplace(mix_ab): + dfmix = DataFrame(mix_ab) + # the same inplace + # lists of regexes and values + # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] + to_replace_res = [r"\s*\.\s*", r"a"] + values = [np.nan, "crap"] + res = dfmix.copy() + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b", np.nan, np.nan]}) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] + to_replace_res = [r"\s*(\.)\s*", r"(a|b)"] + values = [r"\1\1", r"\1_crap"] + res = dfmix.copy() + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None + expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN + # or vN)] + to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] + values = [r"\1\1", r"crap", r"\1_crap"] + res = dfmix.copy() + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] + values = [r"\1\1", r"crap", r"\1_crap"] + res = dfmix.copy() + return_value = res.replace(regex=to_replace_res, value=values, inplace=True) + assert return_value is None + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + +def test_regex_replace_dict_mixed(mix_abc): + dfmix = DataFrame(mix_abc) + + # dicts + # single dict {re1: v1}, search the whole frame + # need test for this... + + # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole + # frame + res = dfmix.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True) + res2 = dfmix.copy() + return_value = res2.replace( + {"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True + ) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + + # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the + # whole frame + res = dfmix.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True) + res2 = dfmix.copy() + return_value = res2.replace( + {"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True + ) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) - # regex -> regex - # obj frame - res = dfobj.copy() - return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) - assert return_value is None - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.copy() - return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) - assert return_value is None - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - # everything with compiled regexs as well - res = dfobj.copy() - return_value = res.replace( - re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True - ) - assert return_value is None - tm.assert_frame_equal(dfobj, res.fillna(".")) + res = dfmix.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}) + res2 = dfmix.copy() + return_value = res2.replace( + regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True + ) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) - # mixed - res = dfmix.copy() - return_value = res.replace( - re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True - ) - assert return_value is None - tm.assert_frame_equal(dfmix, res.fillna(".")) + # scalar -> dict + # to_replace regex, {value: value} + expec = DataFrame( + {"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]} + ) + res = dfmix.replace("a", {"b": np.nan}, regex=True) + res2 = dfmix.copy() + return_value = res2.replace("a", {"b": np.nan}, regex=True, inplace=True) + assert return_value is None + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + + res = dfmix.replace("a", {"b": np.nan}, regex=True) + res2 = dfmix.copy() + return_value = res2.replace(regex="a", value={"b": np.nan}, inplace=True) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + + +def test_regex_replace_dict_nested(mix_abc): + # nested dicts will not work until this is implemented for Series + dfmix = DataFrame(mix_abc) + res = dfmix.replace({"b": {r"\s*\.\s*": np.nan}}, regex=True) + res2 = dfmix.copy() + res4 = dfmix.copy() + return_value = res2.replace({"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True) + assert return_value is None + res3 = dfmix.replace(regex={"b": {r"\s*\.\s*": np.nan}}) + return_value = res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + tm.assert_frame_equal(res4, expec) + + +def test_regex_replace_dict_nested_non_first_character(): + # GH 25259 + df = DataFrame({"first": ["abc", "bca", "cab"]}) + expected = DataFrame({"first": [".bc", "bc.", "c.b"]}) + result = df.replace({"a": "."}, regex=True) + tm.assert_frame_equal(result, expected) + + +def test_regex_replace_dict_nested_gh4115(): + df = DataFrame({"Type": ["Q", "T", "Q", "Q", "T"], "tmp": 2}) + expected = DataFrame({"Type": [0, 1, 0, 0, 1], "tmp": 2}) + result = df.replace({"Type": {"Q": 0, "T": 1}}) + tm.assert_frame_equal(result, expected) + + +def test_regex_replace_list_to_scalar(mix_abc): + df = DataFrame(mix_abc) + expec = DataFrame( + { + "a": mix_abc["a"], + "b": np.array([np.nan] * 4), + "c": [np.nan, np.nan, np.nan, "d"], + } + ) + res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True) + res2 = df.copy() + res3 = df.copy() + return_value = res2.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True) + assert return_value is None + return_value = res3.replace(regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True) + assert return_value is None + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + + +def test_regex_replace_str_to_numeric(mix_abc): + # what happens when you try to replace a numeric value with a regex? + df = DataFrame(mix_abc) + res = df.replace(r"\s*\.\s*", 0, regex=True) + res2 = df.copy() + return_value = res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True) + assert return_value is None + res3 = df.copy() + return_value = res3.replace(regex=r"\s*\.\s*", value=0, inplace=True) + assert return_value is None + expec = DataFrame({"a": mix_abc["a"], "b": ["a", "b", 0, 0], "c": mix_abc["c"]}) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + + +def test_regex_replace_regex_list_to_numeric(mix_abc): + df = DataFrame(mix_abc) + res = df.replace([r"\s*\.\s*", "b"], 0, regex=True) + res2 = df.copy() + return_value = res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True) + assert return_value is None + res3 = df.copy() + return_value = res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", 0, 0, 0], "c": ["a", 0, np.nan, "d"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + + +def test_regex_replace_series_of_regexes(mix_abc): + df = DataFrame(mix_abc) + s1 = Series({"b": r"\s*\.\s*"}) + s2 = Series({"b": np.nan}) + res = df.replace(s1, s2, regex=True) + res2 = df.copy() + return_value = res2.replace(s1, s2, inplace=True, regex=True) + assert return_value is None + res3 = df.copy() + return_value = res3.replace(regex=s1, value=s2, inplace=True) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + + +def test_regex_replace_numeric_to_object_conversion(mix_abc): + df = DataFrame(mix_abc) + expec = DataFrame({"a": ["a", 1, 2, 3], "b": mix_abc["b"], "c": mix_abc["c"]}) + res = df.replace(0, "a") + tm.assert_frame_equal(res, expec) + assert res.a.dtype == np.object_ + + +@pytest.mark.parametrize("to_replace", [{"": np.nan, ",": ""}, {",": "", "": np.nan}]) +def test_joint_simple_replace_and_regex_replace(to_replace): + # GH-39338 + df = DataFrame( + { + "col1": ["1,000", "a", "3"], + "col2": ["a", "", "b"], + "col3": ["a", "b", "c"], + } + ) + result = df.replace(regex=to_replace) + expected = DataFrame( + { + "col1": ["1000", "a", "3"], + "col2": ["a", np.nan, "b"], + "col3": ["a", "b", "c"], + } + ) + tm.assert_frame_equal(result, expected) - # regex -> regex - # obj frame - res = dfobj.copy() - return_value = res.replace( - re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True - ) - assert return_value is None - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.copy() - return_value = res.replace( - re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True - ) - assert return_value is None - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - res = dfobj.copy() - return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) - assert return_value is None - tm.assert_frame_equal(dfobj, res.fillna(".")) +@pytest.mark.parametrize("metachar", ["[]", "()", r"\d", r"\w", r"\s"]) +def test_replace_regex_metachar(metachar): + df = DataFrame({"a": [metachar, "else"]}) + result = df.replace({"a": {metachar: "paren"}}) + expected = DataFrame({"a": ["paren", "else"]}) + tm.assert_frame_equal(result, expected) - # mixed - res = dfmix.copy() - return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) - assert return_value is None - tm.assert_frame_equal(dfmix, res.fillna(".")) - # regex -> regex - # obj frame - res = dfobj.copy() - return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) - assert return_value is None - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.copy() - return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) - assert return_value is None - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - # everything with compiled regexs as well - res = dfobj.copy() - return_value = res.replace( - regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True - ) - assert return_value is None - tm.assert_frame_equal(dfobj, res.fillna(".")) +def test_replace(datetime_frame): + datetime_frame["A"][:5] = np.nan + datetime_frame["A"][-5:] = np.nan - # mixed - res = dfmix.copy() - return_value = res.replace( - regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True - ) - assert return_value is None - tm.assert_frame_equal(dfmix, res.fillna(".")) + zero_filled = datetime_frame.replace(np.nan, -1e8) + tm.assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8)) + tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), datetime_frame) - # regex -> regex - # obj frame - res = dfobj.copy() - return_value = res.replace( - regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True - ) - assert return_value is None - objc = obj.copy() - objc["a"] = ["a", "b", "...", "..."] - expec = DataFrame(objc) - tm.assert_frame_equal(res, expec) - - # with mixed - res = dfmix.copy() - return_value = res.replace( - regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True - ) - assert return_value is None - mixc = mix_ab.copy() - mixc["b"] = ["a", "b", "...", "..."] - expec = DataFrame(mixc) - tm.assert_frame_equal(res, expec) - - def test_regex_replace_list_obj(self): - obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")} - dfobj = DataFrame(obj) - - # lists of regexes and values - # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] - to_replace_res = [r"\s*\.\s*", r"e|f|g"] - values = [np.nan, "crap"] - res = dfobj.replace(to_replace_res, values, regex=True) - expec = DataFrame( - { - "a": ["a", "b", np.nan, np.nan], - "b": ["crap"] * 3 + ["h"], - "c": ["h", "crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) - - # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] - to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"] - values = [r"\1\1", r"\1_crap"] - res = dfobj.replace(to_replace_res, values, regex=True) - expec = DataFrame( - { - "a": ["a", "b", "..", ".."], - "b": ["e_crap", "f_crap", "g_crap", "h"], - "c": ["h", "e_crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) - - # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN - # or vN)] - to_replace_res = [r"\s*(\.)\s*", r"e"] - values = [r"\1\1", r"crap"] - res = dfobj.replace(to_replace_res, values, regex=True) - expec = DataFrame( - { - "a": ["a", "b", "..", ".."], - "b": ["crap", "f", "g", "h"], - "c": ["h", "crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) - - to_replace_res = [r"\s*(\.)\s*", r"e"] - values = [r"\1\1", r"crap"] - res = dfobj.replace(value=values, regex=to_replace_res) - expec = DataFrame( - { - "a": ["a", "b", "..", ".."], - "b": ["crap", "f", "g", "h"], - "c": ["h", "crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) - - def test_regex_replace_list_obj_inplace(self): - # same as above with inplace=True - # lists of regexes and values - obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")} - dfobj = DataFrame(obj) - - # lists of regexes and values - # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] - to_replace_res = [r"\s*\.\s*", r"e|f|g"] - values = [np.nan, "crap"] - res = dfobj.copy() - return_value = res.replace(to_replace_res, values, inplace=True, regex=True) - assert return_value is None - expec = DataFrame( - { - "a": ["a", "b", np.nan, np.nan], - "b": ["crap"] * 3 + ["h"], - "c": ["h", "crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) + datetime_frame["A"][:5] = np.nan + datetime_frame["A"][-5:] = np.nan + datetime_frame["B"][:5] = -1e8 - # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] - to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"] - values = [r"\1\1", r"\1_crap"] - res = dfobj.copy() - return_value = res.replace(to_replace_res, values, inplace=True, regex=True) - assert return_value is None - expec = DataFrame( - { - "a": ["a", "b", "..", ".."], - "b": ["e_crap", "f_crap", "g_crap", "h"], - "c": ["h", "e_crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) - - # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN - # or vN)] - to_replace_res = [r"\s*(\.)\s*", r"e"] - values = [r"\1\1", r"crap"] - res = dfobj.copy() - return_value = res.replace(to_replace_res, values, inplace=True, regex=True) - assert return_value is None - expec = DataFrame( - { - "a": ["a", "b", "..", ".."], - "b": ["crap", "f", "g", "h"], - "c": ["h", "crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) + # empty + df = DataFrame(index=["a", "b"]) + tm.assert_frame_equal(df, df.replace(5, 7)) - to_replace_res = [r"\s*(\.)\s*", r"e"] - values = [r"\1\1", r"crap"] - res = dfobj.copy() - return_value = res.replace(value=values, regex=to_replace_res, inplace=True) - assert return_value is None - expec = DataFrame( - { - "a": ["a", "b", "..", ".."], - "b": ["crap", "f", "g", "h"], - "c": ["h", "crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) - - def test_regex_replace_list_mixed(self, mix_ab): - # mixed frame to make sure this doesn't break things - dfmix = DataFrame(mix_ab) - - # lists of regexes and values - # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] - to_replace_res = [r"\s*\.\s*", r"a"] - values = [np.nan, "crap"] - mix2 = {"a": list(range(4)), "b": list("ab.."), "c": list("halo")} - dfmix2 = DataFrame(mix2) - res = dfmix2.replace(to_replace_res, values, regex=True) - expec = DataFrame( - { - "a": mix2["a"], - "b": ["crap", "b", np.nan, np.nan], - "c": ["h", "crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) - - # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] - to_replace_res = [r"\s*(\.)\s*", r"(a|b)"] - values = [r"\1\1", r"\1_crap"] - res = dfmix.replace(to_replace_res, values, regex=True) - expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]}) - tm.assert_frame_equal(res, expec) - - # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN - # or vN)] - to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] - values = [r"\1\1", r"crap", r"\1_crap"] - res = dfmix.replace(to_replace_res, values, regex=True) - expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) - tm.assert_frame_equal(res, expec) - - to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] - values = [r"\1\1", r"crap", r"\1_crap"] - res = dfmix.replace(regex=to_replace_res, value=values) - expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) - tm.assert_frame_equal(res, expec) - - def test_regex_replace_list_mixed_inplace(self, mix_ab): - dfmix = DataFrame(mix_ab) - # the same inplace - # lists of regexes and values - # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] - to_replace_res = [r"\s*\.\s*", r"a"] - values = [np.nan, "crap"] - res = dfmix.copy() - return_value = res.replace(to_replace_res, values, inplace=True, regex=True) - assert return_value is None - expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b", np.nan, np.nan]}) - tm.assert_frame_equal(res, expec) - - # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] - to_replace_res = [r"\s*(\.)\s*", r"(a|b)"] - values = [r"\1\1", r"\1_crap"] - res = dfmix.copy() - return_value = res.replace(to_replace_res, values, inplace=True, regex=True) - assert return_value is None - expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]}) - tm.assert_frame_equal(res, expec) - - # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN - # or vN)] - to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] - values = [r"\1\1", r"crap", r"\1_crap"] - res = dfmix.copy() - return_value = res.replace(to_replace_res, values, inplace=True, regex=True) - assert return_value is None - expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) - tm.assert_frame_equal(res, expec) + # GH 11698 + # test for mixed data types. + df = DataFrame( + [("-", pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))] + ) + df1 = df.replace("-", np.nan) + expected_df = DataFrame( + [(np.nan, pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))] + ) + tm.assert_frame_equal(df1, expected_df) + + +def test_replace_list(): + obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")} + dfobj = DataFrame(obj) + + # lists of regexes and values + # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN] + to_replace_res = [r".", r"e"] + values = [np.nan, "crap"] + res = dfobj.replace(to_replace_res, values) + expec = DataFrame( + { + "a": ["a", "b", np.nan, np.nan], + "b": ["crap", "f", "g", "h"], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + # list of [v1, v2, ..., vN] -> [v1, v2, .., vN] + to_replace_res = [r".", r"f"] + values = [r"..", r"crap"] + res = dfobj.replace(to_replace_res, values) + expec = DataFrame( + { + "a": ["a", "b", "..", ".."], + "b": ["e", "crap", "g", "h"], + "c": ["h", "e", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + +def test_replace_with_empty_list(frame_or_series): + # GH 21977 + ser = Series([["a", "b"], [], np.nan, [1]]) + obj = DataFrame({"col": ser}) + if frame_or_series is Series: + obj = ser + expected = obj + result = obj.replace([], np.nan) + tm.assert_equal(result, expected) + + # GH 19266 + msg = ( + "NumPy boolean array indexing assignment cannot assign {size} " + "input values to the 1 output values where the mask is true" + ) + with pytest.raises(ValueError, match=msg.format(size=0)): + obj.replace({np.nan: []}) + with pytest.raises(ValueError, match=msg.format(size=2)): + obj.replace({np.nan: ["dummy", "alt"]}) + + +def test_replace_series_dict(): + # from GH 3064 + df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}}) + result = df.replace(0, {"zero": 0.5, "one": 1.0}) + expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 2.0, "b": 1.0}}) + tm.assert_frame_equal(result, expected) + + result = df.replace(0, df.mean()) + tm.assert_frame_equal(result, expected) + + # series to series/dict + df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}}) + s = Series({"zero": 0.0, "one": 2.0}) + result = df.replace(s, {"zero": 0.5, "one": 1.0}) + expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 1.0, "b": 0.0}}) + tm.assert_frame_equal(result, expected) + + result = df.replace(s, df.mean()) + tm.assert_frame_equal(result, expected) + + +def test_replace_convert(): + # gh 3907 + df = DataFrame([["foo", "bar", "bah"], ["bar", "foo", "bah"]]) + m = {"foo": 1, "bar": 2, "bah": 3} + rep = df.replace(m) + expec = Series([np.int64] * 3) + res = rep.dtypes + tm.assert_series_equal(expec, res) + + +def test_replace_mixed(float_string_frame): + mf = float_string_frame + mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan + mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan + + result = float_string_frame.replace(np.nan, -18) + expected = float_string_frame.fillna(value=-18) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result.replace(-18, np.nan), float_string_frame) + + result = float_string_frame.replace(np.nan, -1e8) + expected = float_string_frame.fillna(value=-1e8) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result.replace(-1e8, np.nan), float_string_frame) + + +def test_replace_mixed_int_block_upcasting(): + + # int block upcasting + df = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0, 1], dtype="int64"), + } + ) + expected = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0.5, 1], dtype="float64"), + } + ) + result = df.replace(0, 0.5) + tm.assert_frame_equal(result, expected) - to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] - values = [r"\1\1", r"crap", r"\1_crap"] - res = dfmix.copy() - return_value = res.replace(regex=to_replace_res, value=values, inplace=True) - assert return_value is None - expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) - tm.assert_frame_equal(res, expec) - - def test_regex_replace_dict_mixed(self, mix_abc): - dfmix = DataFrame(mix_abc) - - # dicts - # single dict {re1: v1}, search the whole frame - # need test for this... - - # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole - # frame - res = dfmix.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True) - res2 = dfmix.copy() - return_value = res2.replace( - {"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True - ) - assert return_value is None - expec = DataFrame( - {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} - ) - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) - - # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the - # whole frame - res = dfmix.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True) - res2 = dfmix.copy() - return_value = res2.replace( - {"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True - ) - assert return_value is None - expec = DataFrame( - {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} - ) - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) + return_value = df.replace(0, 0.5, inplace=True) + assert return_value is None + tm.assert_frame_equal(df, expected) - res = dfmix.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}) - res2 = dfmix.copy() - return_value = res2.replace( - regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True - ) - assert return_value is None - expec = DataFrame( - {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} - ) - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) - # scalar -> dict - # to_replace regex, {value: value} - expec = DataFrame( - {"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]} - ) - res = dfmix.replace("a", {"b": np.nan}, regex=True) - res2 = dfmix.copy() - return_value = res2.replace("a", {"b": np.nan}, regex=True, inplace=True) - assert return_value is None - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) +def test_replace_mixed_int_block_splitting(): - res = dfmix.replace("a", {"b": np.nan}, regex=True) - res2 = dfmix.copy() - return_value = res2.replace(regex="a", value={"b": np.nan}, inplace=True) - assert return_value is None - expec = DataFrame( - {"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]} - ) - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) - - def test_regex_replace_dict_nested(self, mix_abc): - # nested dicts will not work until this is implemented for Series - dfmix = DataFrame(mix_abc) - res = dfmix.replace({"b": {r"\s*\.\s*": np.nan}}, regex=True) - res2 = dfmix.copy() - res4 = dfmix.copy() - return_value = res2.replace( - {"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True - ) - assert return_value is None - res3 = dfmix.replace(regex={"b": {r"\s*\.\s*": np.nan}}) - return_value = res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True) - assert return_value is None - expec = DataFrame( - {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} - ) - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) - tm.assert_frame_equal(res3, expec) - tm.assert_frame_equal(res4, expec) - - def test_regex_replace_dict_nested_non_first_character(self): - # GH 25259 - df = DataFrame({"first": ["abc", "bca", "cab"]}) - expected = DataFrame({"first": [".bc", "bc.", "c.b"]}) - result = df.replace({"a": "."}, regex=True) - tm.assert_frame_equal(result, expected) - - def test_regex_replace_dict_nested_gh4115(self): - df = DataFrame({"Type": ["Q", "T", "Q", "Q", "T"], "tmp": 2}) - expected = DataFrame({"Type": [0, 1, 0, 0, 1], "tmp": 2}) - result = df.replace({"Type": {"Q": 0, "T": 1}}) - tm.assert_frame_equal(result, expected) - - def test_regex_replace_list_to_scalar(self, mix_abc): - df = DataFrame(mix_abc) - expec = DataFrame( - { - "a": mix_abc["a"], - "b": np.array([np.nan] * 4), - "c": [np.nan, np.nan, np.nan, "d"], - } - ) - res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True) - res2 = df.copy() - res3 = df.copy() - return_value = res2.replace( - [r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True - ) - assert return_value is None - return_value = res3.replace( - regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True - ) - assert return_value is None - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) - tm.assert_frame_equal(res3, expec) - - def test_regex_replace_str_to_numeric(self, mix_abc): - # what happens when you try to replace a numeric value with a regex? - df = DataFrame(mix_abc) - res = df.replace(r"\s*\.\s*", 0, regex=True) - res2 = df.copy() - return_value = res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True) - assert return_value is None - res3 = df.copy() - return_value = res3.replace(regex=r"\s*\.\s*", value=0, inplace=True) - assert return_value is None - expec = DataFrame({"a": mix_abc["a"], "b": ["a", "b", 0, 0], "c": mix_abc["c"]}) - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) - tm.assert_frame_equal(res3, expec) - - def test_regex_replace_regex_list_to_numeric(self, mix_abc): - df = DataFrame(mix_abc) - res = df.replace([r"\s*\.\s*", "b"], 0, regex=True) - res2 = df.copy() - return_value = res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True) - assert return_value is None - res3 = df.copy() - return_value = res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True) - assert return_value is None - expec = DataFrame( - {"a": mix_abc["a"], "b": ["a", 0, 0, 0], "c": ["a", 0, np.nan, "d"]} - ) - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) - tm.assert_frame_equal(res3, expec) - - def test_regex_replace_series_of_regexes(self, mix_abc): - df = DataFrame(mix_abc) - s1 = Series({"b": r"\s*\.\s*"}) - s2 = Series({"b": np.nan}) - res = df.replace(s1, s2, regex=True) - res2 = df.copy() - return_value = res2.replace(s1, s2, inplace=True, regex=True) - assert return_value is None - res3 = df.copy() - return_value = res3.replace(regex=s1, value=s2, inplace=True) - assert return_value is None - expec = DataFrame( - {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} - ) - tm.assert_frame_equal(res, expec) - tm.assert_frame_equal(res2, expec) - tm.assert_frame_equal(res3, expec) - - def test_regex_replace_numeric_to_object_conversion(self, mix_abc): - df = DataFrame(mix_abc) - expec = DataFrame({"a": ["a", 1, 2, 3], "b": mix_abc["b"], "c": mix_abc["c"]}) - res = df.replace(0, "a") - tm.assert_frame_equal(res, expec) - assert res.a.dtype == np.object_ - - @pytest.mark.parametrize( - "to_replace", [{"": np.nan, ",": ""}, {",": "", "": np.nan}] - ) - def test_joint_simple_replace_and_regex_replace(self, to_replace): - # GH-39338 - df = DataFrame( - { - "col1": ["1,000", "a", "3"], - "col2": ["a", "", "b"], - "col3": ["a", "b", "c"], - } - ) - result = df.replace(regex=to_replace) - expected = DataFrame( - { - "col1": ["1000", "a", "3"], - "col2": ["a", np.nan, "b"], - "col3": ["a", "b", "c"], - } - ) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("metachar", ["[]", "()", r"\d", r"\w", r"\s"]) - def test_replace_regex_metachar(self, metachar): - df = DataFrame({"a": [metachar, "else"]}) - result = df.replace({"a": {metachar: "paren"}}) - expected = DataFrame({"a": ["paren", "else"]}) - tm.assert_frame_equal(result, expected) - - def test_replace(self, datetime_frame): - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan - - zero_filled = datetime_frame.replace(np.nan, -1e8) - tm.assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8)) - tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), datetime_frame) - - datetime_frame["A"][:5] = np.nan - datetime_frame["A"][-5:] = np.nan - datetime_frame["B"][:5] = -1e8 - - # empty - df = DataFrame(index=["a", "b"]) - tm.assert_frame_equal(df, df.replace(5, 7)) - - # GH 11698 - # test for mixed data types. - df = DataFrame( - [("-", pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))] - ) - df1 = df.replace("-", np.nan) - expected_df = DataFrame( - [(np.nan, pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))] - ) - tm.assert_frame_equal(df1, expected_df) - - def test_replace_list(self): - obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")} - dfobj = DataFrame(obj) - - # lists of regexes and values - # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN] - to_replace_res = [r".", r"e"] - values = [np.nan, "crap"] - res = dfobj.replace(to_replace_res, values) - expec = DataFrame( - { - "a": ["a", "b", np.nan, np.nan], - "b": ["crap", "f", "g", "h"], - "c": ["h", "crap", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) - - # list of [v1, v2, ..., vN] -> [v1, v2, .., vN] - to_replace_res = [r".", r"f"] - values = [r"..", r"crap"] - res = dfobj.replace(to_replace_res, values) - expec = DataFrame( - { - "a": ["a", "b", "..", ".."], - "b": ["e", "crap", "g", "h"], - "c": ["h", "e", "l", "o"], - } - ) - tm.assert_frame_equal(res, expec) - - def test_replace_with_empty_list(self, frame_or_series): - # GH 21977 - ser = Series([["a", "b"], [], np.nan, [1]]) - obj = DataFrame({"col": ser}) - if frame_or_series is Series: - obj = ser - expected = obj - result = obj.replace([], np.nan) - tm.assert_equal(result, expected) - - # GH 19266 - msg = ( - "NumPy boolean array indexing assignment cannot assign {size} " - "input values to the 1 output values where the mask is true" - ) - with pytest.raises(ValueError, match=msg.format(size=0)): - obj.replace({np.nan: []}) - with pytest.raises(ValueError, match=msg.format(size=2)): - obj.replace({np.nan: ["dummy", "alt"]}) - - def test_replace_series_dict(self): - # from GH 3064 - df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}}) - result = df.replace(0, {"zero": 0.5, "one": 1.0}) - expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 2.0, "b": 1.0}}) - tm.assert_frame_equal(result, expected) - - result = df.replace(0, df.mean()) - tm.assert_frame_equal(result, expected) - - # series to series/dict - df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}}) - s = Series({"zero": 0.0, "one": 2.0}) - result = df.replace(s, {"zero": 0.5, "one": 1.0}) - expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 1.0, "b": 0.0}}) - tm.assert_frame_equal(result, expected) - - result = df.replace(s, df.mean()) - tm.assert_frame_equal(result, expected) - - def test_replace_convert(self): - # gh 3907 - df = DataFrame([["foo", "bar", "bah"], ["bar", "foo", "bah"]]) - m = {"foo": 1, "bar": 2, "bah": 3} - rep = df.replace(m) - expec = Series([np.int64] * 3) - res = rep.dtypes - tm.assert_series_equal(expec, res) - - def test_replace_mixed(self, float_string_frame): - mf = float_string_frame - mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan - mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan - - result = float_string_frame.replace(np.nan, -18) - expected = float_string_frame.fillna(value=-18) - tm.assert_frame_equal(result, expected) - tm.assert_frame_equal(result.replace(-18, np.nan), float_string_frame) - - result = float_string_frame.replace(np.nan, -1e8) - expected = float_string_frame.fillna(value=-1e8) - tm.assert_frame_equal(result, expected) - tm.assert_frame_equal(result.replace(-1e8, np.nan), float_string_frame) - - def test_replace_mixed_int_block_upcasting(self): - - # int block upcasting - df = DataFrame( - { - "A": Series([1.0, 2.0], dtype="float64"), - "B": Series([0, 1], dtype="int64"), - } - ) - expected = DataFrame( - { - "A": Series([1.0, 2.0], dtype="float64"), - "B": Series([0.5, 1], dtype="float64"), - } - ) - result = df.replace(0, 0.5) - tm.assert_frame_equal(result, expected) + # int block splitting + df = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0, 1], dtype="int64"), + "C": Series([1, 2], dtype="int64"), + } + ) + expected = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0.5, 1], dtype="float64"), + "C": Series([1, 2], dtype="int64"), + } + ) + result = df.replace(0, 0.5) + tm.assert_frame_equal(result, expected) - return_value = df.replace(0, 0.5, inplace=True) - assert return_value is None - tm.assert_frame_equal(df, expected) - def test_replace_mixed_int_block_splitting(self): +def test_replace_mixed2(): - # int block splitting - df = DataFrame( - { - "A": Series([1.0, 2.0], dtype="float64"), - "B": Series([0, 1], dtype="int64"), - "C": Series([1, 2], dtype="int64"), - } - ) - expected = DataFrame( - { - "A": Series([1.0, 2.0], dtype="float64"), - "B": Series([0.5, 1], dtype="float64"), - "C": Series([1, 2], dtype="int64"), - } - ) - result = df.replace(0, 0.5) - tm.assert_frame_equal(result, expected) + # to object block upcasting + df = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0, 1], dtype="int64"), + } + ) + expected = DataFrame( + { + "A": Series([1, "foo"], dtype="object"), + "B": Series([0, 1], dtype="int64"), + } + ) + result = df.replace(2, "foo") + tm.assert_frame_equal(result, expected) - def test_replace_mixed2(self): + expected = DataFrame( + { + "A": Series(["foo", "bar"], dtype="object"), + "B": Series([0, "foo"], dtype="object"), + } + ) + result = df.replace([1, 2], ["foo", "bar"]) + tm.assert_frame_equal(result, expected) - # to object block upcasting - df = DataFrame( - { - "A": Series([1.0, 2.0], dtype="float64"), - "B": Series([0, 1], dtype="int64"), - } - ) - expected = DataFrame( - { - "A": Series([1, "foo"], dtype="object"), - "B": Series([0, 1], dtype="int64"), - } - ) - result = df.replace(2, "foo") - tm.assert_frame_equal(result, expected) - - expected = DataFrame( - { - "A": Series(["foo", "bar"], dtype="object"), - "B": Series([0, "foo"], dtype="object"), - } - ) - result = df.replace([1, 2], ["foo", "bar"]) - tm.assert_frame_equal(result, expected) - def test_replace_mixed3(self): - # test case from - df = DataFrame( - {"A": Series([3, 0], dtype="int64"), "B": Series([0, 3], dtype="int64")} - ) - result = df.replace(3, df.mean().to_dict()) - expected = df.copy().astype("float64") - m = df.mean() - expected.iloc[0, 0] = m[0] - expected.iloc[1, 1] = m[1] - tm.assert_frame_equal(result, expected) - - def test_replace_simple_nested_dict(self): - df = DataFrame({"col": range(1, 5)}) - expected = DataFrame({"col": ["a", 2, 3, "b"]}) - - result = df.replace({"col": {1: "a", 4: "b"}}) - tm.assert_frame_equal(expected, result) - - # in this case, should be the same as the not nested version - result = df.replace({1: "a", 4: "b"}) - tm.assert_frame_equal(expected, result) - - def test_replace_simple_nested_dict_with_nonexistent_value(self): - df = DataFrame({"col": range(1, 5)}) - expected = DataFrame({"col": ["a", 2, 3, "b"]}) - - result = df.replace({-1: "-", 1: "a", 4: "b"}) - tm.assert_frame_equal(expected, result) - - result = df.replace({"col": {-1: "-", 1: "a", 4: "b"}}) - tm.assert_frame_equal(expected, result) - - def test_replace_value_is_none(self, datetime_frame): - orig_value = datetime_frame.iloc[0, 0] - orig2 = datetime_frame.iloc[1, 0] - - datetime_frame.iloc[0, 0] = np.nan - datetime_frame.iloc[1, 0] = 1 - - result = datetime_frame.replace(to_replace={np.nan: 0}) - expected = datetime_frame.T.replace(to_replace={np.nan: 0}).T - tm.assert_frame_equal(result, expected) - - result = datetime_frame.replace(to_replace={np.nan: 0, 1: -1e8}) - tsframe = datetime_frame.copy() - tsframe.iloc[0, 0] = 0 - tsframe.iloc[1, 0] = -1e8 - expected = tsframe - tm.assert_frame_equal(expected, result) - datetime_frame.iloc[0, 0] = orig_value - datetime_frame.iloc[1, 0] = orig2 - - def test_replace_for_new_dtypes(self, datetime_frame): - - # dtypes - tsframe = datetime_frame.copy().astype(np.float32) - tsframe["A"][:5] = np.nan - tsframe["A"][-5:] = np.nan - - zero_filled = tsframe.replace(np.nan, -1e8) - tm.assert_frame_equal(zero_filled, tsframe.fillna(-1e8)) - tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe) - - tsframe["A"][:5] = np.nan - tsframe["A"][-5:] = np.nan - tsframe["B"][:5] = -1e8 - - b = tsframe["B"] - b[b == -1e8] = np.nan - tsframe["B"] = b - result = tsframe.fillna(method="bfill") - tm.assert_frame_equal(result, tsframe.fillna(method="bfill")) - - @pytest.mark.parametrize( - "frame, to_replace, value, expected", - [ - (DataFrame({"ints": [1, 2, 3]}), 1, 0, DataFrame({"ints": [0, 2, 3]})), - ( - DataFrame({"ints": [1, 2, 3]}, dtype=np.int32), - 1, - 0, - DataFrame({"ints": [0, 2, 3]}, dtype=np.int32), - ), - ( - DataFrame({"ints": [1, 2, 3]}, dtype=np.int16), - 1, - 0, - DataFrame({"ints": [0, 2, 3]}, dtype=np.int16), - ), - ( - DataFrame({"bools": [True, False, True]}), - False, - True, - DataFrame({"bools": [True, True, True]}), - ), - ( - DataFrame({"complex": [1j, 2j, 3j]}), - 1j, - 0, - DataFrame({"complex": [0j, 2j, 3j]}), - ), - ( - DataFrame( - { - "datetime64": Index( - [ - datetime(2018, 5, 28), - datetime(2018, 7, 28), - datetime(2018, 5, 28), - ] - ) - } - ), - datetime(2018, 5, 28), - datetime(2018, 7, 28), - DataFrame({"datetime64": Index([datetime(2018, 7, 28)] * 3)}), - ), - # GH 20380 - ( - DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["foo"]}), - "foo", - "bar", - DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["bar"]}), - ), - ( - DataFrame( - { - "A": date_range("20130101", periods=3, tz="US/Eastern"), - "B": [0, np.nan, 2], - } - ), - Timestamp("20130102", tz="US/Eastern"), - Timestamp("20130104", tz="US/Eastern"), - DataFrame( - { - "A": [ - Timestamp("20130101", tz="US/Eastern"), - Timestamp("20130104", tz="US/Eastern"), - Timestamp("20130103", tz="US/Eastern"), - ], - "B": [0, np.nan, 2], - } - ), - ), - # GH 35376 - ( - DataFrame([[1, 1.0], [2, 2.0]]), - 1.0, - 5, - DataFrame([[5, 5.0], [2, 2.0]]), - ), - ( - DataFrame([[1, 1.0], [2, 2.0]]), - 1, - 5, - DataFrame([[5, 5.0], [2, 2.0]]), +def test_replace_mixed3(): + # test case from + df = DataFrame( + {"A": Series([3, 0], dtype="int64"), "B": Series([0, 3], dtype="int64")} + ) + result = df.replace(3, df.mean().to_dict()) + expected = df.copy().astype("float64") + m = df.mean() + expected.iloc[0, 0] = m[0] + expected.iloc[1, 1] = m[1] + tm.assert_frame_equal(result, expected) + + +def test_replace_simple_nested_dict(): + df = DataFrame({"col": range(1, 5)}) + expected = DataFrame({"col": ["a", 2, 3, "b"]}) + + result = df.replace({"col": {1: "a", 4: "b"}}) + tm.assert_frame_equal(expected, result) + + # in this case, should be the same as the not nested version + result = df.replace({1: "a", 4: "b"}) + tm.assert_frame_equal(expected, result) + + +def test_replace_simple_nested_dict_with_nonexistent_value(): + df = DataFrame({"col": range(1, 5)}) + expected = DataFrame({"col": ["a", 2, 3, "b"]}) + + result = df.replace({-1: "-", 1: "a", 4: "b"}) + tm.assert_frame_equal(expected, result) + + result = df.replace({"col": {-1: "-", 1: "a", 4: "b"}}) + tm.assert_frame_equal(expected, result) + + +def test_replace_value_is_none(datetime_frame): + orig_value = datetime_frame.iloc[0, 0] + orig2 = datetime_frame.iloc[1, 0] + + datetime_frame.iloc[0, 0] = np.nan + datetime_frame.iloc[1, 0] = 1 + + result = datetime_frame.replace(to_replace={np.nan: 0}) + expected = datetime_frame.T.replace(to_replace={np.nan: 0}).T + tm.assert_frame_equal(result, expected) + + result = datetime_frame.replace(to_replace={np.nan: 0, 1: -1e8}) + tsframe = datetime_frame.copy() + tsframe.iloc[0, 0] = 0 + tsframe.iloc[1, 0] = -1e8 + expected = tsframe + tm.assert_frame_equal(expected, result) + datetime_frame.iloc[0, 0] = orig_value + datetime_frame.iloc[1, 0] = orig2 + + +def test_replace_for_new_dtypes(datetime_frame): + + # dtypes + tsframe = datetime_frame.copy().astype(np.float32) + tsframe["A"][:5] = np.nan + tsframe["A"][-5:] = np.nan + + zero_filled = tsframe.replace(np.nan, -1e8) + tm.assert_frame_equal(zero_filled, tsframe.fillna(-1e8)) + tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe) + + tsframe["A"][:5] = np.nan + tsframe["A"][-5:] = np.nan + tsframe["B"][:5] = -1e8 + + b = tsframe["B"] + b[b == -1e8] = np.nan + tsframe["B"] = b + result = tsframe.fillna(method="bfill") + tm.assert_frame_equal(result, tsframe.fillna(method="bfill")) + + +@pytest.mark.parametrize( + "frame, to_replace, value, expected", + [ + (DataFrame({"ints": [1, 2, 3]}), 1, 0, DataFrame({"ints": [0, 2, 3]})), + ( + DataFrame({"ints": [1, 2, 3]}, dtype=np.int32), + 1, + 0, + DataFrame({"ints": [0, 2, 3]}, dtype=np.int32), + ), + ( + DataFrame({"ints": [1, 2, 3]}, dtype=np.int16), + 1, + 0, + DataFrame({"ints": [0, 2, 3]}, dtype=np.int16), + ), + ( + DataFrame({"bools": [True, False, True]}), + False, + True, + DataFrame({"bools": [True, True, True]}), + ), + ( + DataFrame({"complex": [1j, 2j, 3j]}), + 1j, + 0, + DataFrame({"complex": [0j, 2j, 3j]}), + ), + ( + DataFrame( + { + "datetime64": Index( + [ + datetime(2018, 5, 28), + datetime(2018, 7, 28), + datetime(2018, 5, 28), + ] + ) + } ), - ( - DataFrame([[1, 1.0], [2, 2.0]]), - 1.0, - 5.0, - DataFrame([[5, 5.0], [2, 2.0]]), + datetime(2018, 5, 28), + datetime(2018, 7, 28), + DataFrame({"datetime64": Index([datetime(2018, 7, 28)] * 3)}), + ), + # GH 20380 + ( + DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["foo"]}), + "foo", + "bar", + DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["bar"]}), + ), + ( + DataFrame( + { + "A": date_range("20130101", periods=3, tz="US/Eastern"), + "B": [0, np.nan, 2], + } ), - ( - DataFrame([[1, 1.0], [2, 2.0]]), - 1, - 5.0, - DataFrame([[5, 5.0], [2, 2.0]]), + Timestamp("20130102", tz="US/Eastern"), + Timestamp("20130104", tz="US/Eastern"), + DataFrame( + { + "A": [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130104", tz="US/Eastern"), + Timestamp("20130103", tz="US/Eastern"), + ], + "B": [0, np.nan, 2], + } ), - ], + ), + # GH 35376 + ( + DataFrame([[1, 1.0], [2, 2.0]]), + 1.0, + 5, + DataFrame([[5, 5.0], [2, 2.0]]), + ), + ( + DataFrame([[1, 1.0], [2, 2.0]]), + 1, + 5, + DataFrame([[5, 5.0], [2, 2.0]]), + ), + ( + DataFrame([[1, 1.0], [2, 2.0]]), + 1.0, + 5.0, + DataFrame([[5, 5.0], [2, 2.0]]), + ), + ( + DataFrame([[1, 1.0], [2, 2.0]]), + 1, + 5.0, + DataFrame([[5, 5.0], [2, 2.0]]), + ), + ], +) +def test_replace_dtypes(frame, to_replace, value, expected): + result = getattr(frame, "replace")(to_replace, value) + tm.assert_frame_equal(result, expected) + + +def test_replace_input_formats_listlike(): + # both dicts + to_rep = {"A": np.nan, "B": 0, "C": ""} + values = {"A": 0, "B": -1, "C": "missing"} + df = DataFrame({"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}) + filled = df.replace(to_rep, values) + expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()} + tm.assert_frame_equal(filled, DataFrame(expected)) + + result = df.replace([0, 2, 5], [5, 2, 0]) + expected = DataFrame( + {"A": [np.nan, 5, np.inf], "B": [5, 2, 0], "C": ["", "asdf", "fd"]} ) - def test_replace_dtypes(self, frame, to_replace, value, expected): - result = getattr(frame, "replace")(to_replace, value) - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected) + + # scalar to dict + values = {"A": 0, "B": -1, "C": "missing"} + df = DataFrame({"A": [np.nan, 0, np.nan], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}) + filled = df.replace(np.nan, values) + expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()} + tm.assert_frame_equal(filled, DataFrame(expected)) + + # list to list + to_rep = [np.nan, 0, ""] + values = [-2, -1, "missing"] + result = df.replace(to_rep, values) + expected = df.copy() + for i in range(len(to_rep)): + return_value = expected.replace(to_rep[i], values[i], inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) - def test_replace_input_formats_listlike(self): - # both dicts - to_rep = {"A": np.nan, "B": 0, "C": ""} - values = {"A": 0, "B": -1, "C": "missing"} - df = DataFrame( - {"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]} - ) - filled = df.replace(to_rep, values) - expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()} - tm.assert_frame_equal(filled, DataFrame(expected)) + msg = r"Replacement lists must match in length\. Expecting 3 got 2" + with pytest.raises(ValueError, match=msg): + df.replace(to_rep, values[1:]) - result = df.replace([0, 2, 5], [5, 2, 0]) - expected = DataFrame( - {"A": [np.nan, 5, np.inf], "B": [5, 2, 0], "C": ["", "asdf", "fd"]} - ) - tm.assert_frame_equal(result, expected) - # scalar to dict - values = {"A": 0, "B": -1, "C": "missing"} - df = DataFrame( - {"A": [np.nan, 0, np.nan], "B": [0, 2, 5], "C": ["", "asdf", "fd"]} - ) - filled = df.replace(np.nan, values) - expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()} - tm.assert_frame_equal(filled, DataFrame(expected)) - - # list to list - to_rep = [np.nan, 0, ""] - values = [-2, -1, "missing"] - result = df.replace(to_rep, values) - expected = df.copy() - for i in range(len(to_rep)): - return_value = expected.replace(to_rep[i], values[i], inplace=True) - assert return_value is None - tm.assert_frame_equal(result, expected) - - msg = r"Replacement lists must match in length\. Expecting 3 got 2" - with pytest.raises(ValueError, match=msg): - df.replace(to_rep, values[1:]) - - def test_replace_input_formats_scalar(self): - df = DataFrame( - {"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]} - ) +def test_replace_input_formats_scalar(): + df = DataFrame({"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]}) - # dict to scalar - to_rep = {"A": np.nan, "B": 0, "C": ""} - filled = df.replace(to_rep, 0) - expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()} - tm.assert_frame_equal(filled, DataFrame(expected)) - - msg = "value argument must be scalar, dict, or Series" - with pytest.raises(TypeError, match=msg): - df.replace(to_rep, [np.nan, 0, ""]) - - # list to scalar - to_rep = [np.nan, 0, ""] - result = df.replace(to_rep, -1) - expected = df.copy() - for i in range(len(to_rep)): - return_value = expected.replace(to_rep[i], -1, inplace=True) - assert return_value is None - tm.assert_frame_equal(result, expected) - - def test_replace_limit(self): - pass - - def test_replace_dict_no_regex(self): - answer = Series( - { - 0: "Strongly Agree", - 1: "Agree", - 2: "Neutral", - 3: "Disagree", - 4: "Strongly Disagree", - } - ) - weights = { + # dict to scalar + to_rep = {"A": np.nan, "B": 0, "C": ""} + filled = df.replace(to_rep, 0) + expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()} + tm.assert_frame_equal(filled, DataFrame(expected)) + + msg = "value argument must be scalar, dict, or Series" + with pytest.raises(TypeError, match=msg): + df.replace(to_rep, [np.nan, 0, ""]) + + # list to scalar + to_rep = [np.nan, 0, ""] + result = df.replace(to_rep, -1) + expected = df.copy() + for i in range(len(to_rep)): + return_value = expected.replace(to_rep[i], -1, inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + +def test_replace_limit(): + pass + + +def test_replace_dict_no_regex(): + answer = Series( + { + 0: "Strongly Agree", + 1: "Agree", + 2: "Neutral", + 3: "Disagree", + 4: "Strongly Disagree", + } + ) + weights = { + "Agree": 4, + "Disagree": 2, + "Neutral": 3, + "Strongly Agree": 5, + "Strongly Disagree": 1, + } + expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}) + result = answer.replace(weights) + tm.assert_series_equal(result, expected) + + +def test_replace_series_no_regex(): + answer = Series( + { + 0: "Strongly Agree", + 1: "Agree", + 2: "Neutral", + 3: "Disagree", + 4: "Strongly Disagree", + } + ) + weights = Series( + { "Agree": 4, "Disagree": 2, "Neutral": 3, "Strongly Agree": 5, "Strongly Disagree": 1, } - expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}) - result = answer.replace(weights) - tm.assert_series_equal(result, expected) - - def test_replace_series_no_regex(self): - answer = Series( - { - 0: "Strongly Agree", - 1: "Agree", - 2: "Neutral", - 3: "Disagree", - 4: "Strongly Disagree", - } - ) - weights = Series( - { - "Agree": 4, - "Disagree": 2, - "Neutral": 3, - "Strongly Agree": 5, - "Strongly Disagree": 1, - } - ) - expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}) - result = answer.replace(weights) - tm.assert_series_equal(result, expected) - - def test_replace_dict_tuple_list_ordering_remains_the_same(self): - df = DataFrame({"A": [np.nan, 1]}) - res1 = df.replace(to_replace={np.nan: 0, 1: -1e8}) - res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0]) - res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0]) - - expected = DataFrame({"A": [0, -1e8]}) - tm.assert_frame_equal(res1, res2) - tm.assert_frame_equal(res2, res3) - tm.assert_frame_equal(res3, expected) - - def test_replace_doesnt_replace_without_regex(self): - raw = """fol T_opp T_Dir T_Enh - 0 1 0 0 vo - 1 2 vr 0 0 - 2 2 0 0 0 - 3 3 0 bt 0""" - df = pd.read_csv(StringIO(raw), sep=r"\s+") - res = df.replace({r"\D": 1}) - tm.assert_frame_equal(df, res) - - def test_replace_bool_with_string(self): - df = DataFrame({"a": [True, False], "b": list("ab")}) - result = df.replace(True, "a") - expected = DataFrame({"a": ["a", False], "b": df.b}) - tm.assert_frame_equal(result, expected) - - def test_replace_pure_bool_with_string_no_op(self): - df = DataFrame(np.random.rand(2, 2) > 0.5) - result = df.replace("asdf", "fdsa") - tm.assert_frame_equal(df, result) - - def test_replace_bool_with_bool(self): - df = DataFrame(np.random.rand(2, 2) > 0.5) - result = df.replace(False, True) - expected = DataFrame(np.ones((2, 2), dtype=bool)) - tm.assert_frame_equal(result, expected) - - def test_replace_with_dict_with_bool_keys(self): - df = DataFrame({0: [True, False], 1: [False, True]}) - result = df.replace({"asdf": "asdb", True: "yes"}) - expected = DataFrame({0: ["yes", False], 1: [False, "yes"]}) - tm.assert_frame_equal(result, expected) - - def test_replace_dict_strings_vs_ints(self): - # GH#34789 - df = DataFrame({"Y0": [1, 2], "Y1": [3, 4]}) - result = df.replace({"replace_string": "test"}) - - tm.assert_frame_equal(result, df) - - result = df["Y0"].replace({"replace_string": "test"}) - tm.assert_series_equal(result, df["Y0"]) - - def test_replace_truthy(self): - df = DataFrame({"a": [True, True]}) - r = df.replace([np.inf, -np.inf], np.nan) - e = df - tm.assert_frame_equal(r, e) - - def test_nested_dict_overlapping_keys_replace_int(self): - # GH 27660 keep behaviour consistent for simple dictionary and - # nested dictionary replacement - df = DataFrame({"a": list(range(1, 5))}) - - result = df.replace({"a": dict(zip(range(1, 5), range(2, 6)))}) - expected = df.replace(dict(zip(range(1, 5), range(2, 6)))) - tm.assert_frame_equal(result, expected) - - def test_nested_dict_overlapping_keys_replace_str(self): - # GH 27660 - a = np.arange(1, 5) - astr = a.astype(str) - bstr = np.arange(2, 6).astype(str) - df = DataFrame({"a": astr}) - result = df.replace(dict(zip(astr, bstr))) - expected = df.replace({"a": dict(zip(astr, bstr))}) - tm.assert_frame_equal(result, expected) - - def test_replace_swapping_bug(self): - df = DataFrame({"a": [True, False, True]}) - res = df.replace({"a": {True: "Y", False: "N"}}) - expect = DataFrame({"a": ["Y", "N", "Y"]}) - tm.assert_frame_equal(res, expect) - - df = DataFrame({"a": [0, 1, 0]}) - res = df.replace({"a": {0: "Y", 1: "N"}}) - expect = DataFrame({"a": ["Y", "N", "Y"]}) - tm.assert_frame_equal(res, expect) - - def test_replace_period(self): - d = { - "fname": { - "out_augmented_AUG_2011.json": pd.Period(year=2011, month=8, freq="M"), - "out_augmented_JAN_2011.json": pd.Period(year=2011, month=1, freq="M"), - "out_augmented_MAY_2012.json": pd.Period(year=2012, month=5, freq="M"), - "out_augmented_SUBSIDY_WEEK.json": pd.Period( - year=2011, month=4, freq="M" - ), - "out_augmented_AUG_2012.json": pd.Period(year=2012, month=8, freq="M"), - "out_augmented_MAY_2011.json": pd.Period(year=2011, month=5, freq="M"), - "out_augmented_SEP_2013.json": pd.Period(year=2013, month=9, freq="M"), - } - } + ) + expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}) + result = answer.replace(weights) + tm.assert_series_equal(result, expected) - df = DataFrame( - [ - "out_augmented_AUG_2012.json", - "out_augmented_SEP_2013.json", - "out_augmented_SUBSIDY_WEEK.json", - "out_augmented_MAY_2012.json", - "out_augmented_MAY_2011.json", - "out_augmented_AUG_2011.json", - "out_augmented_JAN_2011.json", - ], - columns=["fname"], - ) - assert set(df.fname.values) == set(d["fname"].keys()) - # We don't support converting object -> specialized EA in - # replace yet. - expected = DataFrame( - {"fname": [d["fname"][k] for k in df.fname.values]}, dtype=object - ) - result = df.replace(d) - tm.assert_frame_equal(result, expected) - - def test_replace_datetime(self): - d = { - "fname": { - "out_augmented_AUG_2011.json": Timestamp("2011-08"), - "out_augmented_JAN_2011.json": Timestamp("2011-01"), - "out_augmented_MAY_2012.json": Timestamp("2012-05"), - "out_augmented_SUBSIDY_WEEK.json": Timestamp("2011-04"), - "out_augmented_AUG_2012.json": Timestamp("2012-08"), - "out_augmented_MAY_2011.json": Timestamp("2011-05"), - "out_augmented_SEP_2013.json": Timestamp("2013-09"), - } - } - df = DataFrame( - [ - "out_augmented_AUG_2012.json", - "out_augmented_SEP_2013.json", - "out_augmented_SUBSIDY_WEEK.json", - "out_augmented_MAY_2012.json", - "out_augmented_MAY_2011.json", - "out_augmented_AUG_2011.json", - "out_augmented_JAN_2011.json", - ], - columns=["fname"], - ) - assert set(df.fname.values) == set(d["fname"].keys()) - expected = DataFrame({"fname": [d["fname"][k] for k in df.fname.values]}) - result = df.replace(d) - tm.assert_frame_equal(result, expected) - - def test_replace_datetimetz(self): - - # GH 11326 - # behaving poorly when presented with a datetime64[ns, tz] - df = DataFrame( - { - "A": date_range("20130101", periods=3, tz="US/Eastern"), - "B": [0, np.nan, 2], - } - ) - result = df.replace(np.nan, 1) - expected = DataFrame( - { - "A": date_range("20130101", periods=3, tz="US/Eastern"), - "B": Series([0, 1, 2], dtype="float64"), - } - ) - tm.assert_frame_equal(result, expected) +def test_replace_dict_tuple_list_ordering_remains_the_same(): + df = DataFrame({"A": [np.nan, 1]}) + res1 = df.replace(to_replace={np.nan: 0, 1: -1e8}) + res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0]) + res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0]) - result = df.fillna(1) - tm.assert_frame_equal(result, expected) + expected = DataFrame({"A": [0, -1e8]}) + tm.assert_frame_equal(res1, res2) + tm.assert_frame_equal(res2, res3) + tm.assert_frame_equal(res3, expected) - result = df.replace(0, np.nan) - expected = DataFrame( - { - "A": date_range("20130101", periods=3, tz="US/Eastern"), - "B": [np.nan, np.nan, 2], - } - ) - tm.assert_frame_equal(result, expected) - result = df.replace( - Timestamp("20130102", tz="US/Eastern"), - Timestamp("20130104", tz="US/Eastern"), - ) - expected = DataFrame( - { - "A": [ - Timestamp("20130101", tz="US/Eastern"), - Timestamp("20130104", tz="US/Eastern"), - Timestamp("20130103", tz="US/Eastern"), - ], - "B": [0, np.nan, 2], - } - ) - tm.assert_frame_equal(result, expected) - - result = df.copy() - result.iloc[1, 0] = np.nan - result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Eastern")) - tm.assert_frame_equal(result, expected) - - # coerce to object - result = df.copy() - result.iloc[1, 0] = np.nan - result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific")) - expected = DataFrame( - { - "A": [ - Timestamp("20130101", tz="US/Eastern"), - Timestamp("20130104", tz="US/Pacific"), - Timestamp("20130103", tz="US/Eastern"), - ], - "B": [0, np.nan, 2], - } - ) - tm.assert_frame_equal(result, expected) - - result = df.copy() - result.iloc[1, 0] = np.nan - result = result.replace({"A": np.nan}, Timestamp("20130104")) - expected = DataFrame( - { - "A": [ - Timestamp("20130101", tz="US/Eastern"), - Timestamp("20130104"), - Timestamp("20130103", tz="US/Eastern"), - ], - "B": [0, np.nan, 2], - } - ) - tm.assert_frame_equal(result, expected) +def test_replace_doesnt_replace_without_regex(): + raw = """fol T_opp T_Dir T_Enh + 0 1 0 0 vo + 1 2 vr 0 0 + 2 2 0 0 0 + 3 3 0 bt 0""" + df = pd.read_csv(StringIO(raw), sep=r"\s+") + res = df.replace({r"\D": 1}) + tm.assert_frame_equal(df, res) - def test_replace_with_empty_dictlike(self, mix_abc): - # GH 15289 - df = DataFrame(mix_abc) - tm.assert_frame_equal(df, df.replace({})) - tm.assert_frame_equal(df, df.replace(Series([], dtype=object))) - tm.assert_frame_equal(df, df.replace({"b": {}})) - tm.assert_frame_equal(df, df.replace(Series({"b": {}}))) +def test_replace_bool_with_string(): + df = DataFrame({"a": [True, False], "b": list("ab")}) + result = df.replace(True, "a") + expected = DataFrame({"a": ["a", False], "b": df.b}) + tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize( - "to_replace, method, expected", - [ - (0, "bfill", {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}), - ( - np.nan, - "bfill", - {"A": [0, 1, 2], "B": [5.0, 7.0, 7.0], "C": ["a", "b", "c"]}, - ), - ("d", "ffill", {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}), - ( - [0, 2], - "bfill", - {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, - ), - ( - [1, 2], - "pad", - {"A": [0, 0, 0], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, - ), - ( - (1, 2), - "bfill", - {"A": [0, 2, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, - ), - ( - ["b", "c"], - "ffill", - {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "a", "a"]}, - ), - ], - ) - def test_replace_method(self, to_replace, method, expected): - # GH 19632 - df = DataFrame({"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}) - result = df.replace(to_replace=to_replace, value=None, method=method) - expected = DataFrame(expected) - tm.assert_frame_equal(result, expected) +def test_replace_pure_bool_with_string_no_op(): + df = DataFrame(np.random.rand(2, 2) > 0.5) + result = df.replace("asdf", "fdsa") + tm.assert_frame_equal(df, result) - @pytest.mark.parametrize( - "replace_dict, final_data", - [({"a": 1, "b": 1}, [[3, 3], [2, 2]]), ({"a": 1, "b": 2}, [[3, 1], [2, 3]])], - ) - def test_categorical_replace_with_dict(self, replace_dict, final_data): - # GH 26988 - df = DataFrame([[1, 1], [2, 2]], columns=["a", "b"], dtype="category") - final_data = np.array(final_data) +def test_replace_bool_with_bool(): + df = DataFrame(np.random.rand(2, 2) > 0.5) + result = df.replace(False, True) + expected = DataFrame(np.ones((2, 2), dtype=bool)) + tm.assert_frame_equal(result, expected) - a = pd.Categorical(final_data[:, 0], categories=[3, 2]) - ex_cat = [3, 2] if replace_dict["b"] == 1 else [1, 3] - b = pd.Categorical(final_data[:, 1], categories=ex_cat) +def test_replace_with_dict_with_bool_keys(): + df = DataFrame({0: [True, False], 1: [False, True]}) + result = df.replace({"asdf": "asdb", True: "yes"}) + expected = DataFrame({0: ["yes", False], 1: [False, "yes"]}) + tm.assert_frame_equal(result, expected) + + +def test_replace_dict_strings_vs_ints(): + # GH#34789 + df = DataFrame({"Y0": [1, 2], "Y1": [3, 4]}) + result = df.replace({"replace_string": "test"}) + + tm.assert_frame_equal(result, df) + + result = df["Y0"].replace({"replace_string": "test"}) + tm.assert_series_equal(result, df["Y0"]) + + +def test_replace_truthy(): + df = DataFrame({"a": [True, True]}) + r = df.replace([np.inf, -np.inf], np.nan) + e = df + tm.assert_frame_equal(r, e) + + +def test_nested_dict_overlapping_keys_replace_int(): + # GH 27660 keep behaviour consistent for simple dictionary and + # nested dictionary replacement + df = DataFrame({"a": list(range(1, 5))}) + + result = df.replace({"a": dict(zip(range(1, 5), range(2, 6)))}) + expected = df.replace(dict(zip(range(1, 5), range(2, 6)))) + tm.assert_frame_equal(result, expected) + + +def test_nested_dict_overlapping_keys_replace_str(): + # GH 27660 + a = np.arange(1, 5) + astr = a.astype(str) + bstr = np.arange(2, 6).astype(str) + df = DataFrame({"a": astr}) + result = df.replace(dict(zip(astr, bstr))) + expected = df.replace({"a": dict(zip(astr, bstr))}) + tm.assert_frame_equal(result, expected) - expected = DataFrame({"a": a, "b": b}) - result = df.replace(replace_dict, 3) - tm.assert_frame_equal(result, expected) - msg = ( - r"Attributes of DataFrame.iloc\[:, 0\] \(column name=\"a\"\) are " - "different" - ) - with pytest.raises(AssertionError, match=msg): - # ensure non-inplace call does not affect original - tm.assert_frame_equal(df, expected) - return_value = df.replace(replace_dict, 3, inplace=True) - assert return_value is None - tm.assert_frame_equal(df, expected) - @pytest.mark.parametrize( - "df, to_replace, exp", +def test_replace_swapping_bug(): + df = DataFrame({"a": [True, False, True]}) + res = df.replace({"a": {True: "Y", False: "N"}}) + expect = DataFrame({"a": ["Y", "N", "Y"]}) + tm.assert_frame_equal(res, expect) + + df = DataFrame({"a": [0, 1, 0]}) + res = df.replace({"a": {0: "Y", 1: "N"}}) + expect = DataFrame({"a": ["Y", "N", "Y"]}) + tm.assert_frame_equal(res, expect) + + +def test_replace_period(): + d = { + "fname": { + "out_augmented_AUG_2011.json": pd.Period(year=2011, month=8, freq="M"), + "out_augmented_JAN_2011.json": pd.Period(year=2011, month=1, freq="M"), + "out_augmented_MAY_2012.json": pd.Period(year=2012, month=5, freq="M"), + "out_augmented_SUBSIDY_WEEK.json": pd.Period(year=2011, month=4, freq="M"), + "out_augmented_AUG_2012.json": pd.Period(year=2012, month=8, freq="M"), + "out_augmented_MAY_2011.json": pd.Period(year=2011, month=5, freq="M"), + "out_augmented_SEP_2013.json": pd.Period(year=2013, month=9, freq="M"), + } + } + + df = DataFrame( [ - ( - {"col1": [1, 2, 3], "col2": [4, 5, 6]}, - {4: 5, 5: 6, 6: 7}, - {"col1": [1, 2, 3], "col2": [5, 6, 7]}, - ), - ( - {"col1": [1, 2, 3], "col2": ["4", "5", "6"]}, - {"4": "5", "5": "6", "6": "7"}, - {"col1": [1, 2, 3], "col2": ["5", "6", "7"]}, - ), + "out_augmented_AUG_2012.json", + "out_augmented_SEP_2013.json", + "out_augmented_SUBSIDY_WEEK.json", + "out_augmented_MAY_2012.json", + "out_augmented_MAY_2011.json", + "out_augmented_AUG_2011.json", + "out_augmented_JAN_2011.json", ], + columns=["fname"], ) - def test_replace_commutative(self, df, to_replace, exp): - # GH 16051 - # DataFrame.replace() overwrites when values are non-numeric - # also added to data frame whilst issue was for series - - df = DataFrame(df) - - expected = DataFrame(exp) - result = df.replace(to_replace) - tm.assert_frame_equal(result, expected) + assert set(df.fname.values) == set(d["fname"].keys()) + # We don't support converting object -> specialized EA in + # replace yet. + expected = DataFrame( + {"fname": [d["fname"][k] for k in df.fname.values]}, dtype=object + ) + result = df.replace(d) + tm.assert_frame_equal(result, expected) + + +def test_replace_datetime(): + d = { + "fname": { + "out_augmented_AUG_2011.json": Timestamp("2011-08"), + "out_augmented_JAN_2011.json": Timestamp("2011-01"), + "out_augmented_MAY_2012.json": Timestamp("2012-05"), + "out_augmented_SUBSIDY_WEEK.json": Timestamp("2011-04"), + "out_augmented_AUG_2012.json": Timestamp("2012-08"), + "out_augmented_MAY_2011.json": Timestamp("2011-05"), + "out_augmented_SEP_2013.json": Timestamp("2013-09"), + } + } - @pytest.mark.parametrize( - "replacer", + df = DataFrame( [ - Timestamp("20170827"), - np.int8(1), - np.int16(1), - np.float32(1), - np.float64(1), + "out_augmented_AUG_2012.json", + "out_augmented_SEP_2013.json", + "out_augmented_SUBSIDY_WEEK.json", + "out_augmented_MAY_2012.json", + "out_augmented_MAY_2011.json", + "out_augmented_AUG_2011.json", + "out_augmented_JAN_2011.json", ], + columns=["fname"], ) - def test_replace_replacer_dtype(self, request, replacer): - # GH26632 - if np.isscalar(replacer) and replacer.dtype.itemsize < 8: - request.node.add_marker( - pytest.mark.xfail( - np_version_under1p20, reason="np.putmask doesn't coerce dtype" - ) - ) - df = DataFrame(["a"]) - result = df.replace({"a": replacer, "b": replacer}) - expected = DataFrame([replacer]) - tm.assert_frame_equal(result, expected) - - def test_replace_after_convert_dtypes(self): - # GH31517 - df = DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64") - result = df.replace(1, 10) - expected = DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64") - tm.assert_frame_equal(result, expected) - - def test_replace_invalid_to_replace(self): - # GH 18634 - # API: replace() should raise an exception if invalid argument is given - df = DataFrame({"one": ["a", "b ", "c"], "two": ["d ", "e ", "f "]}) - msg = ( - r"Expecting 'to_replace' to be either a scalar, array-like, " - r"dict or None, got invalid type.*" - ) - with pytest.raises(TypeError, match=msg): - df.replace(lambda x: x.strip()) - - @pytest.mark.parametrize("dtype", ["float", "float64", "int64", "Int64", "boolean"]) - @pytest.mark.parametrize("value", [np.nan, pd.NA]) - def test_replace_no_replacement_dtypes(self, dtype, value): - # https://github.com/pandas-dev/pandas/issues/32988 - df = DataFrame(np.eye(2), dtype=dtype) - result = df.replace(to_replace=[None, -np.inf, np.inf], value=value) - tm.assert_frame_equal(result, df) - - @pytest.mark.parametrize("replacement", [np.nan, 5]) - def test_replace_with_duplicate_columns(self, replacement): - # GH 24798 - result = DataFrame({"A": [1, 2, 3], "A1": [4, 5, 6], "B": [7, 8, 9]}) - result.columns = list("AAB") - - expected = DataFrame( - {"A": [1, 2, 3], "A1": [4, 5, 6], "B": [replacement, 8, 9]} - ) - expected.columns = list("AAB") - - result["B"] = result["B"].replace(7, replacement) - - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("value", [pd.Period("2020-01"), pd.Interval(0, 5)]) - def test_replace_ea_ignore_float(self, frame_or_series, value): - # GH#34871 - obj = DataFrame({"Per": [value] * 3}) - if frame_or_series is not DataFrame: - obj = obj["Per"] - - expected = obj.copy() - result = obj.replace(1.0, 0.0) - tm.assert_equal(expected, result) - - def test_replace_value_category_type(self): - """ - Test for #23305: to ensure category dtypes are maintained - after replace with direct values - """ - - # create input data - input_dict = { - "col1": [1, 2, 3, 4], - "col2": ["a", "b", "c", "d"], - "col3": [1.5, 2.5, 3.5, 4.5], - "col4": ["cat1", "cat2", "cat3", "cat4"], - "col5": ["obj1", "obj2", "obj3", "obj4"], + assert set(df.fname.values) == set(d["fname"].keys()) + expected = DataFrame({"fname": [d["fname"][k] for k in df.fname.values]}) + result = df.replace(d) + tm.assert_frame_equal(result, expected) + + +def test_replace_datetimetz(): + + # GH 11326 + # behaving poorly when presented with a datetime64[ns, tz] + df = DataFrame( + { + "A": date_range("20130101", periods=3, tz="US/Eastern"), + "B": [0, np.nan, 2], } - # explicitly cast columns as category and order them - input_df = DataFrame(data=input_dict).astype( - {"col2": "category", "col4": "category"} - ) - input_df["col2"] = input_df["col2"].cat.reorder_categories( - ["a", "b", "c", "d"], ordered=True - ) - input_df["col4"] = input_df["col4"].cat.reorder_categories( - ["cat1", "cat2", "cat3", "cat4"], ordered=True - ) + ) + result = df.replace(np.nan, 1) + expected = DataFrame( + { + "A": date_range("20130101", periods=3, tz="US/Eastern"), + "B": Series([0, 1, 2], dtype="float64"), + } + ) + tm.assert_frame_equal(result, expected) - # create expected dataframe - expected_dict = { - "col1": [1, 2, 3, 4], - "col2": ["a", "b", "c", "z"], - "col3": [1.5, 2.5, 3.5, 4.5], - "col4": ["cat1", "catX", "cat3", "cat4"], - "col5": ["obj9", "obj2", "obj3", "obj4"], + result = df.fillna(1) + tm.assert_frame_equal(result, expected) + + result = df.replace(0, np.nan) + expected = DataFrame( + { + "A": date_range("20130101", periods=3, tz="US/Eastern"), + "B": [np.nan, np.nan, 2], } - # explicitly cast columns as category and order them - expected = DataFrame(data=expected_dict).astype( - {"col2": "category", "col4": "category"} - ) - expected["col2"] = expected["col2"].cat.reorder_categories( - ["a", "b", "c", "z"], ordered=True - ) - expected["col4"] = expected["col4"].cat.reorder_categories( - ["cat1", "catX", "cat3", "cat4"], ordered=True + ) + tm.assert_frame_equal(result, expected) + + result = df.replace( + Timestamp("20130102", tz="US/Eastern"), + Timestamp("20130104", tz="US/Eastern"), + ) + expected = DataFrame( + { + "A": [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130104", tz="US/Eastern"), + Timestamp("20130103", tz="US/Eastern"), + ], + "B": [0, np.nan, 2], + } + ) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.iloc[1, 0] = np.nan + result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Eastern")) + tm.assert_frame_equal(result, expected) + + # coerce to object + result = df.copy() + result.iloc[1, 0] = np.nan + result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific")) + expected = DataFrame( + { + "A": [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130104", tz="US/Pacific"), + Timestamp("20130103", tz="US/Eastern"), + ], + "B": [0, np.nan, 2], + } + ) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.iloc[1, 0] = np.nan + result = result.replace({"A": np.nan}, Timestamp("20130104")) + expected = DataFrame( + { + "A": [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130104"), + Timestamp("20130103", tz="US/Eastern"), + ], + "B": [0, np.nan, 2], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_replace_with_empty_dictlike(mix_abc): + # GH 15289 + df = DataFrame(mix_abc) + tm.assert_frame_equal(df, df.replace({})) + tm.assert_frame_equal(df, df.replace(Series([], dtype=object))) + + tm.assert_frame_equal(df, df.replace({"b": {}})) + tm.assert_frame_equal(df, df.replace(Series({"b": {}}))) + + +@pytest.mark.parametrize( + "to_replace, method, expected", + [ + (0, "bfill", {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}), + ( + np.nan, + "bfill", + {"A": [0, 1, 2], "B": [5.0, 7.0, 7.0], "C": ["a", "b", "c"]}, + ), + ("d", "ffill", {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}), + ( + [0, 2], + "bfill", + {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, + ), + ( + [1, 2], + "pad", + {"A": [0, 0, 0], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, + ), + ( + (1, 2), + "bfill", + {"A": [0, 2, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, + ), + ( + ["b", "c"], + "ffill", + {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "a", "a"]}, + ), + ], +) +def test_replace_method(to_replace, method, expected): + # GH 19632 + df = DataFrame({"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}) + + result = df.replace(to_replace=to_replace, value=None, method=method) + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "replace_dict, final_data", + [({"a": 1, "b": 1}, [[3, 3], [2, 2]]), ({"a": 1, "b": 2}, [[3, 1], [2, 3]])], +) +def test_categorical_replace_with_dict(replace_dict, final_data): + # GH 26988 + df = DataFrame([[1, 1], [2, 2]], columns=["a", "b"], dtype="category") + + final_data = np.array(final_data) + + a = pd.Categorical(final_data[:, 0], categories=[3, 2]) + + ex_cat = [3, 2] if replace_dict["b"] == 1 else [1, 3] + b = pd.Categorical(final_data[:, 1], categories=ex_cat) + + expected = DataFrame({"a": a, "b": b}) + result = df.replace(replace_dict, 3) + tm.assert_frame_equal(result, expected) + msg = r"Attributes of DataFrame.iloc\[:, 0\] \(column name=\"a\"\) are different" + with pytest.raises(AssertionError, match=msg): + # ensure non-inplace call does not affect original + tm.assert_frame_equal(df, expected) + return_value = df.replace(replace_dict, 3, inplace=True) + assert return_value is None + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize( + "df, to_replace, exp", + [ + ( + {"col1": [1, 2, 3], "col2": [4, 5, 6]}, + {4: 5, 5: 6, 6: 7}, + {"col1": [1, 2, 3], "col2": [5, 6, 7]}, + ), + ( + {"col1": [1, 2, 3], "col2": ["4", "5", "6"]}, + {"4": "5", "5": "6", "6": "7"}, + {"col1": [1, 2, 3], "col2": ["5", "6", "7"]}, + ), + ], +) +def test_replace_commutative(df, to_replace, exp): + # GH 16051 + # DataFrame.replace() overwrites when values are non-numeric + # also added to data frame whilst issue was for series + + df = DataFrame(df) + + expected = DataFrame(exp) + result = df.replace(to_replace) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "replacer", + [ + Timestamp("20170827"), + np.int8(1), + np.int16(1), + np.float32(1), + np.float64(1), + ], +) +def test_replace_replacer_dtype(request, replacer): + # GH26632 + if np.isscalar(replacer) and replacer.dtype.itemsize < 8: + request.node.add_marker( + pytest.mark.xfail( + np_version_under1p20, reason="np.putmask doesn't coerce dtype" + ) ) + df = DataFrame(["a"]) + result = df.replace({"a": replacer, "b": replacer}) + expected = DataFrame([replacer]) + tm.assert_frame_equal(result, expected) - # replace values in input dataframe - input_df = input_df.replace("d", "z") - input_df = input_df.replace("obj1", "obj9") - result = input_df.replace("cat2", "catX") - tm.assert_frame_equal(result, expected) +def test_replace_after_convert_dtypes(): + # GH31517 + df = DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64") + result = df.replace(1, 10) + expected = DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64") + tm.assert_frame_equal(result, expected) - @pytest.mark.xfail( - reason="category dtype gets changed to object type after replace, see #35268", + +def test_replace_invalid_to_replace(): + # GH 18634 + # API: replace() should raise an exception if invalid argument is given + df = DataFrame({"one": ["a", "b ", "c"], "two": ["d ", "e ", "f "]}) + msg = ( + r"Expecting 'to_replace' to be either a scalar, array-like, " + r"dict or None, got invalid type.*" + ) + with pytest.raises(TypeError, match=msg): + df.replace(lambda x: x.strip()) + + +@pytest.mark.parametrize("dtype", ["float", "float64", "int64", "Int64", "boolean"]) +@pytest.mark.parametrize("value", [np.nan, pd.NA]) +def test_replace_no_replacement_dtypes(dtype, value): + # https://github.com/pandas-dev/pandas/issues/32988 + df = DataFrame(np.eye(2), dtype=dtype) + result = df.replace(to_replace=[None, -np.inf, np.inf], value=value) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("replacement", [np.nan, 5]) +def test_replace_with_duplicate_columns(replacement): + # GH 24798 + result = DataFrame({"A": [1, 2, 3], "A1": [4, 5, 6], "B": [7, 8, 9]}) + result.columns = list("AAB") + + expected = DataFrame({"A": [1, 2, 3], "A1": [4, 5, 6], "B": [replacement, 8, 9]}) + expected.columns = list("AAB") + + result["B"] = result["B"].replace(7, replacement) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("value", [pd.Period("2020-01"), pd.Interval(0, 5)]) +def test_replace_ea_ignore_float(frame_or_series, value): + # GH#34871 + obj = DataFrame({"Per": [value] * 3}) + if frame_or_series is not DataFrame: + obj = obj["Per"] + + expected = obj.copy() + result = obj.replace(1.0, 0.0) + tm.assert_equal(expected, result) + + +def test_replace_value_category_type(): + """ + Test for #23305: to ensure category dtypes are maintained + after replace with direct values + """ + + # create input data + input_dict = { + "col1": [1, 2, 3, 4], + "col2": ["a", "b", "c", "d"], + "col3": [1.5, 2.5, 3.5, 4.5], + "col4": ["cat1", "cat2", "cat3", "cat4"], + "col5": ["obj1", "obj2", "obj3", "obj4"], + } + # explicitly cast columns as category and order them + input_df = DataFrame(data=input_dict).astype( + {"col2": "category", "col4": "category"} + ) + input_df["col2"] = input_df["col2"].cat.reorder_categories( + ["a", "b", "c", "d"], ordered=True + ) + input_df["col4"] = input_df["col4"].cat.reorder_categories( + ["cat1", "cat2", "cat3", "cat4"], ordered=True ) - def test_replace_dict_category_type(self, input_category_df, expected_category_df): - """ - Test to ensure category dtypes are maintained - after replace with dict values - """ - # create input dataframe - input_dict = {"col1": ["a"], "col2": ["obj1"], "col3": ["cat1"]} - # explicitly cast columns as category - input_df = DataFrame(data=input_dict).astype( - {"col1": "category", "col2": "category", "col3": "category"} - ) + # create expected dataframe + expected_dict = { + "col1": [1, 2, 3, 4], + "col2": ["a", "b", "c", "z"], + "col3": [1.5, 2.5, 3.5, 4.5], + "col4": ["cat1", "catX", "cat3", "cat4"], + "col5": ["obj9", "obj2", "obj3", "obj4"], + } + # explicitly cast columns as category and order them + expected = DataFrame(data=expected_dict).astype( + {"col2": "category", "col4": "category"} + ) + expected["col2"] = expected["col2"].cat.reorder_categories( + ["a", "b", "c", "z"], ordered=True + ) + expected["col4"] = expected["col4"].cat.reorder_categories( + ["cat1", "catX", "cat3", "cat4"], ordered=True + ) + + # replace values in input dataframe + input_df = input_df.replace("d", "z") + input_df = input_df.replace("obj1", "obj9") + result = input_df.replace("cat2", "catX") + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.xfail( + reason="category dtype gets changed to object type after replace, see #35268", +) +def test_replace_dict_category_type(input_category_df, expected_category_df): + """ + Test to ensure category dtypes are maintained + after replace with dict values + """ + + # create input dataframe + input_dict = {"col1": ["a"], "col2": ["obj1"], "col3": ["cat1"]} + # explicitly cast columns as category + input_df = DataFrame(data=input_dict).astype( + {"col1": "category", "col2": "category", "col3": "category"} + ) + + # create expected dataframe + expected_dict = {"col1": ["z"], "col2": ["obj9"], "col3": ["catX"]} + # explicitly cast columns as category + expected = DataFrame(data=expected_dict).astype( + {"col1": "category", "col2": "category", "col3": "category"} + ) + + # replace values in input dataframe using a dict + result = input_df.replace({"a": "z", "obj1": "obj9", "cat1": "catX"}) + + tm.assert_frame_equal(result, expected) + + +def test_replace_with_compiled_regex(): + # https://github.com/pandas-dev/pandas/issues/35680 + df = DataFrame(["a", "b", "c"]) + regex = re.compile("^a$") + result = df.replace({regex: "z"}, regex=True) + expected = DataFrame(["z", "b", "c"]) + tm.assert_frame_equal(result, expected) + + +def test_replace_intervals(): + # https://github.com/pandas-dev/pandas/issues/35931 + df = DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]}) + result = df.replace({"a": {pd.Interval(0, 1): "x"}}) + expected = DataFrame({"a": ["x", "x"]}) + tm.assert_frame_equal(result, expected) + + +def test_replace_unicode(): + # GH: 16784 + columns_values_map = {"positive": {"正面": 1, "中立": 1, "负面": 0}} + df1 = DataFrame({"positive": np.ones(3)}) + result = df1.replace(columns_values_map) + expected = DataFrame({"positive": np.ones(3)}) + tm.assert_frame_equal(result, expected) - # create expected dataframe - expected_dict = {"col1": ["z"], "col2": ["obj9"], "col3": ["catX"]} - # explicitly cast columns as category - expected = DataFrame(data=expected_dict).astype( - {"col1": "category", "col2": "category", "col3": "category"} - ) - # replace values in input dataframe using a dict - result = input_df.replace({"a": "z", "obj1": "obj9", "cat1": "catX"}) - - tm.assert_frame_equal(result, expected) - - def test_replace_with_compiled_regex(self): - # https://github.com/pandas-dev/pandas/issues/35680 - df = DataFrame(["a", "b", "c"]) - regex = re.compile("^a$") - result = df.replace({regex: "z"}, regex=True) - expected = DataFrame(["z", "b", "c"]) - tm.assert_frame_equal(result, expected) - - def test_replace_intervals(self): - # https://github.com/pandas-dev/pandas/issues/35931 - df = DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]}) - result = df.replace({"a": {pd.Interval(0, 1): "x"}}) - expected = DataFrame({"a": ["x", "x"]}) - tm.assert_frame_equal(result, expected) - - def test_replace_unicode(self): - # GH: 16784 - columns_values_map = {"positive": {"正面": 1, "中立": 1, "负面": 0}} - df1 = DataFrame({"positive": np.ones(3)}) - result = df1.replace(columns_values_map) - expected = DataFrame({"positive": np.ones(3)}) - tm.assert_frame_equal(result, expected) - - def test_replace_bytes(self, frame_or_series): - # GH#38900 - obj = frame_or_series(["o"]).astype("|S") - expected = obj.copy() - obj = obj.replace({None: np.nan}) - tm.assert_equal(obj, expected) +def test_replace_bytes(frame_or_series): + # GH#38900 + obj = frame_or_series(["o"]).astype("|S") + expected = obj.copy() + obj = obj.replace({None: np.nan}) + tm.assert_equal(obj, expected)
Class doesn't seem to have any purpose here
https://api.github.com/repos/pandas-dev/pandas/pulls/41345
2021-05-06T02:59:37Z
2021-05-06T04:06:14Z
null
2021-05-06T19:09:26Z
REF: _cython_transform operate blockwise
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 8637d50745195..7a286188c4e74 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1136,19 +1136,24 @@ def group_rank(float64_t[:, ::1] out, This method modifies the `out` parameter rather than returning an object """ cdef: + Py_ssize_t i, k, N ndarray[float64_t, ndim=1] result - result = rank_1d( - values=values[:, 0], - labels=labels, - is_datetimelike=is_datetimelike, - ties_method=ties_method, - ascending=ascending, - pct=pct, - na_option=na_option - ) - for i in range(len(result)): - out[i, 0] = result[i] + N = values.shape[1] + + for k in range(N): + result = rank_1d( + values=values[:, k], + labels=labels, + is_datetimelike=is_datetimelike, + ties_method=ties_method, + ascending=ascending, + pct=pct, + na_option=na_option + ) + for i in range(len(result)): + # TODO: why cant we do out[:, k] = result? + out[i, k] = result[i] # ---------------------------------------------------------------------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 18506b871bda6..c394390f051de 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -530,6 +530,26 @@ def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs ) + def _cython_transform( + self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs + ): + assert axis == 0 # handled by caller + + obj = self._selected_obj + + is_numeric = is_numeric_dtype(obj.dtype) + if numeric_only and not is_numeric: + raise DataError("No numeric types to aggregate") + + try: + result = self.grouper._cython_operation( + "transform", obj._values, how, axis, **kwargs + ) + except (NotImplementedError, TypeError): + raise DataError("No numeric types to aggregate") + + return obj._constructor(result, index=self.obj.index, name=obj.name) + def _transform_general(self, func: Callable, *args, **kwargs) -> Series: """ Transform with a callable func`. @@ -1258,6 +1278,36 @@ def _wrap_applied_output_series( return self._reindex_output(result) + def _cython_transform( + self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs + ) -> DataFrame: + assert axis == 0 # handled by caller + # TODO: no tests with self.ndim == 1 for DataFrameGroupBy + + # With self.axis == 0, we have multi-block tests + # e.g. test_rank_min_int, test_cython_transform_frame + # test_transform_numeric_ret + # With self.axis == 1, _get_data_to_aggregate does a transpose + # so we always have a single block. + mgr: Manager2D = self._get_data_to_aggregate() + if numeric_only: + mgr = mgr.get_numeric_data(copy=False) + + def arr_func(bvalues: ArrayLike) -> ArrayLike: + return self.grouper._cython_operation( + "transform", bvalues, how, 1, **kwargs + ) + + # We could use `mgr.apply` here and not have to set_axis, but + # we would have to do shape gymnastics for ArrayManager compat + res_mgr = mgr.grouped_reduce(arr_func, ignore_failures=True) + res_mgr.set_axis(1, mgr.axes[1]) + + res_df = self.obj._constructor(res_mgr) + if self.axis == 1: + res_df = res_df.T + return res_df + def _transform_general(self, func, *args, **kwargs): from pandas.core.reshape.concat import concat diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index c5ef18c51a533..0d2be53dc3e0e 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1361,32 +1361,10 @@ def _cython_agg_general( ): raise AbstractMethodError(self) - @final def _cython_transform( self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs ): - output: dict[base.OutputKey, ArrayLike] = {} - - for idx, obj in enumerate(self._iterate_slices()): - name = obj.name - is_numeric = is_numeric_dtype(obj.dtype) - if numeric_only and not is_numeric: - continue - - try: - result = self.grouper._cython_operation( - "transform", obj._values, how, axis, **kwargs - ) - except (NotImplementedError, TypeError): - continue - - key = base.OutputKey(label=name, position=idx) - output[key] = result - - if not output: - raise DataError("No numeric types to aggregate") - - return self._wrap_transformed_output(output) + raise AbstractMethodError(self) @final def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): diff --git a/pandas/tests/apply/test_frame_transform.py b/pandas/tests/apply/test_frame_transform.py index d56c8c1e83ab4..18c36e4096b2b 100644 --- a/pandas/tests/apply/test_frame_transform.py +++ b/pandas/tests/apply/test_frame_transform.py @@ -51,6 +51,19 @@ def test_transform_groupby_kernel(axis, float_frame, op, request): result = float_frame.transform(op, axis, *args) tm.assert_frame_equal(result, expected) + # same thing, but ensuring we have multiple blocks + assert "E" not in float_frame.columns + float_frame["E"] = float_frame["A"].copy() + assert len(float_frame._mgr.arrays) > 1 + + if axis == 0 or axis == "index": + ones = np.ones(float_frame.shape[0]) + else: + ones = np.ones(float_frame.shape[1]) + expected2 = float_frame.groupby(ones, axis=axis).transform(op, *args) + result2 = float_frame.transform(op, axis, *args) + tm.assert_frame_equal(result2, expected2) + @pytest.mark.parametrize( "ops, names", diff --git a/pandas/tests/groupby/test_rank.py b/pandas/tests/groupby/test_rank.py index 20edf03c5b96c..aafdffba43388 100644 --- a/pandas/tests/groupby/test_rank.py +++ b/pandas/tests/groupby/test_rank.py @@ -584,21 +584,23 @@ def test_rank_multiindex(): # GH27721 df = concat( { - "a": DataFrame({"col1": [1, 2], "col2": [3, 4]}), + "a": DataFrame({"col1": [3, 4], "col2": [1, 2]}), "b": DataFrame({"col3": [5, 6], "col4": [7, 8]}), }, axis=1, ) - result = df.groupby(level=0, axis=1).rank(axis=1, ascending=False, method="first") + gb = df.groupby(level=0, axis=1) + result = gb.rank(axis=1) + expected = concat( - { - "a": DataFrame({"col1": [2.0, 2.0], "col2": [1.0, 1.0]}), - "b": DataFrame({"col3": [2.0, 2.0], "col4": [1.0, 1.0]}), - }, + [ + df["a"].rank(axis=1), + df["b"].rank(axis=1), + ], axis=1, + keys=["a", "b"], ) - tm.assert_frame_equal(result, expected) @@ -615,3 +617,24 @@ def test_groupby_axis0_rank_axis1(): # This should match what we get when "manually" operating group-by-group expected = concat([df.loc["a"].rank(axis=1), df.loc["b"].rank(axis=1)], axis=0) tm.assert_frame_equal(res, expected) + + # check that we haven't accidentally written a case that coincidentally + # matches rank(axis=0) + alt = gb.rank(axis=0) + assert not alt.equals(expected) + + +def test_groupby_axis0_cummax_axis1(): + # case where groupby axis is 0 and axis keyword in transform is 1 + + # df has mixed dtype -> multiple blocks + df = DataFrame( + {0: [1, 3, 5, 7], 1: [2, 4, 6, 8], 2: [1.5, 3.5, 5.5, 7.5]}, + index=["a", "a", "b", "b"], + ) + gb = df.groupby(level=0, axis=0) + + cmax = gb.cummax(axis=1) + expected = df[[0, 1]].astype(np.float64) + expected[2] = expected[1] + tm.assert_frame_equal(cmax, expected)
In the process of doing this found that group_rank is silently assuming it is single-column.
https://api.github.com/repos/pandas-dev/pandas/pulls/41344
2021-05-06T02:51:33Z
2021-05-10T14:34:45Z
2021-05-10T14:34:44Z
2021-05-10T14:39:19Z
BUG: replace with regex raising for StringDType
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 5adc8540e6864..7ec74b7045437 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -748,7 +748,7 @@ Strings ^^^^^^^ - Bug in the conversion from ``pyarrow.ChunkedArray`` to :class:`~arrays.StringArray` when the original had zero chunks (:issue:`41040`) -- +- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` ignoring replacements with ``regex=True`` for ``StringDType`` data (:issue:`41333`, :issue:`35977`) Interval ^^^^^^^^ diff --git a/pandas/conftest.py b/pandas/conftest.py index 7b29c41ef70f5..f948dc11bc014 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1153,6 +1153,27 @@ def object_dtype(request): return request.param +@pytest.fixture( + params=[ + "object", + "string", + pytest.param( + "arrow_string", marks=td.skip_if_no("pyarrow", min_version="1.0.0") + ), + ] +) +def any_string_dtype(request): + """ + Parametrized fixture for string dtypes. + * 'object' + * 'string' + * 'arrow_string' + """ + from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401 + + return request.param + + @pytest.fixture(params=tm.DATETIME64_DTYPES) def datetime64_dtype(request): """ diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py index 201b9fdcc51cc..2d3a168a31e1e 100644 --- a/pandas/core/array_algos/replace.py +++ b/pandas/core/array_algos/replace.py @@ -149,7 +149,7 @@ def re_replacer(s): else: return s - f = np.vectorize(re_replacer, otypes=[values.dtype]) + f = np.vectorize(re_replacer, otypes=[np.object_]) if mask is None: values[:] = f(values) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 92f9d803d1ebe..bd4dfdb4ebad0 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -49,6 +49,7 @@ is_extension_array_dtype, is_list_like, is_sparse, + is_string_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( @@ -788,7 +789,7 @@ def _replace_list( src_len = len(pairs) - 1 - if values.dtype == _dtype_obj: + if is_string_dtype(values): # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations mask = ~isna(values) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index e6ed60dc2bb08..3ffaf67c656d9 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -563,10 +563,11 @@ def test_regex_replace_dict_nested(self, mix_abc): tm.assert_frame_equal(res3, expec) tm.assert_frame_equal(res4, expec) - def test_regex_replace_dict_nested_non_first_character(self): + def test_regex_replace_dict_nested_non_first_character(self, any_string_dtype): # GH 25259 - df = DataFrame({"first": ["abc", "bca", "cab"]}) - expected = DataFrame({"first": [".bc", "bc.", "c.b"]}) + dtype = any_string_dtype + df = DataFrame({"first": ["abc", "bca", "cab"]}, dtype=dtype) + expected = DataFrame({"first": [".bc", "bc.", "c.b"]}, dtype=dtype) result = df.replace({"a": "."}, regex=True) tm.assert_frame_equal(result, expected) @@ -685,6 +686,24 @@ def test_replace_regex_metachar(self, metachar): expected = DataFrame({"a": ["paren", "else"]}) tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( + "data,to_replace,expected", + [ + (["xax", "xbx"], {"a": "c", "b": "d"}, ["xcx", "xdx"]), + (["d", "", ""], {r"^\s*$": pd.NA}, ["d", pd.NA, pd.NA]), + ], + ) + def test_regex_replace_string_types( + self, data, to_replace, expected, frame_or_series, any_string_dtype + ): + # GH-41333, GH-35977 + dtype = any_string_dtype + obj = frame_or_series(data, dtype=dtype) + result = obj.replace(to_replace, regex=True) + expected = frame_or_series(expected, dtype=dtype) + + tm.assert_equal(result, expected) + def test_replace(self, datetime_frame): datetime_frame["A"][:5] = np.nan datetime_frame["A"][-5:] = np.nan diff --git a/pandas/tests/strings/conftest.py b/pandas/tests/strings/conftest.py index 17703d970e29e..4fedbee91f649 100644 --- a/pandas/tests/strings/conftest.py +++ b/pandas/tests/strings/conftest.py @@ -1,8 +1,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - from pandas import Series from pandas.core import strings as strings @@ -175,24 +173,3 @@ def any_allowed_skipna_inferred_dtype(request): # correctness of inference tested in tests/dtypes/test_inference.py return inferred_dtype, values - - -@pytest.fixture( - params=[ - "object", - "string", - pytest.param( - "arrow_string", marks=td.skip_if_no("pyarrow", min_version="1.0.0") - ), - ] -) -def any_string_dtype(request): - """ - Parametrized fixture for string dtypes. - * 'object' - * 'string' - * 'arrow_string' - """ - from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401 - - return request.param
- [x] closes #35977, closes #41333 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry 2 separate issues at play here - regex was ignored in #41333 (specific to the `replace_list` path) but then if `replace_regex` was hit with anything but an object type that would just raise.
https://api.github.com/repos/pandas-dev/pandas/pulls/41343
2021-05-06T02:46:02Z
2021-05-11T20:23:12Z
2021-05-11T20:23:12Z
2021-05-11T20:27:16Z
BUG/API: SeriesGroupBy reduction with numeric_only=True
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index d654cf5715bdf..a72cfce6e7477 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -1037,6 +1037,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.__getitem__` with non-unique columns incorrectly returning a malformed :class:`SeriesGroupBy` instead of :class:`DataFrameGroupBy` (:issue:`41427`) - Bug in :meth:`DataFrameGroupBy.transform` with non-unique columns incorrectly raising ``AttributeError`` (:issue:`41427`) - Bug in :meth:`Resampler.apply` with non-unique columns incorrectly dropping duplicated columns (:issue:`41445`) +- Bug in :meth:`SeriesGroupBy` aggregations incorrectly returning empty :class:`Series` instead of raising ``TypeError`` on aggregations that are invalid for its dtype, e.g. ``.prod`` with ``datetime64[ns]`` dtype (:issue:`41342`) - Bug in :meth:`DataFrame.rolling.__iter__` where ``on`` was not assigned to the index of the resulting objects (:issue:`40373`) - Bug in :meth:`DataFrameGroupBy.transform` and :meth:`DataFrameGroupBy.agg` with ``engine="numba"`` where ``*args`` were being cached with the user passed function (:issue:`41647`) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index dec68ab8f392d..b51fb2234e148 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -323,7 +323,7 @@ def _aggregate_multiple_funcs(self, arg) -> DataFrame: return output def _cython_agg_general( - self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 + self, how: str, alt: Callable, numeric_only: bool, min_count: int = -1 ): obj = self._selected_obj @@ -331,7 +331,10 @@ def _cython_agg_general( data = obj._mgr if numeric_only and not is_numeric_dtype(obj.dtype): - raise DataError("No numeric types to aggregate") + # GH#41291 match Series behavior + raise NotImplementedError( + f"{type(self).__name__}.{how} does not implement numeric_only." + ) # This is overkill because it is only called once, but is here to # mirror the array_func used in DataFrameGroupBy._cython_agg_general @@ -1056,7 +1059,7 @@ def _iterate_slices(self) -> Iterable[Series]: yield values def _cython_agg_general( - self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 + self, how: str, alt: Callable, numeric_only: bool, min_count: int = -1 ) -> DataFrame: # Note: we never get here with how="ohlc"; that goes through SeriesGroupBy diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 7c8d83f83e20f..b00a1160fb01b 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1101,6 +1101,34 @@ def _wrap_transformed_output(self, output: Mapping[base.OutputKey, ArrayLike]): def _wrap_applied_output(self, data, keys, values, not_indexed_same: bool = False): raise AbstractMethodError(self) + def _resolve_numeric_only(self, numeric_only: bool | lib.NoDefault) -> bool: + """ + Determine subclass-specific default value for 'numeric_only'. + + For SeriesGroupBy we want the default to be False (to match Series behavior). + For DataFrameGroupBy we want it to be True (for backwards-compat). + + Parameters + ---------- + numeric_only : bool or lib.no_default + + Returns + ------- + bool + """ + # GH#41291 + if numeric_only is lib.no_default: + # i.e. not explicitly passed by user + if self.obj.ndim == 2: + # i.e. DataFrameGroupBy + numeric_only = True + else: + numeric_only = False + + # error: Incompatible return value type (got "Union[bool, NoDefault]", + # expected "bool") + return numeric_only # type: ignore[return-value] + # ----------------------------------------------------------------- # numba @@ -1308,6 +1336,7 @@ def _agg_general( alias: str, npfunc: Callable, ): + with group_selection_context(self): # try a cython aggregation if we can result = None @@ -1367,7 +1396,7 @@ def _agg_py_fallback( return ensure_block_shape(res_values, ndim=ndim) def _cython_agg_general( - self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 + self, how: str, alt: Callable, numeric_only: bool, min_count: int = -1 ): raise AbstractMethodError(self) @@ -1587,7 +1616,7 @@ def count(self): @final @Substitution(name="groupby") @Substitution(see_also=_common_see_also) - def mean(self, numeric_only: bool = True): + def mean(self, numeric_only: bool | lib.NoDefault = lib.no_default): """ Compute mean of groups, excluding missing values. @@ -1635,6 +1664,8 @@ def mean(self, numeric_only: bool = True): 2 4.0 Name: B, dtype: float64 """ + numeric_only = self._resolve_numeric_only(numeric_only) + result = self._cython_agg_general( "mean", alt=lambda x: Series(x).mean(numeric_only=numeric_only), @@ -1645,7 +1676,7 @@ def mean(self, numeric_only: bool = True): @final @Substitution(name="groupby") @Appender(_common_see_also) - def median(self, numeric_only=True): + def median(self, numeric_only: bool | lib.NoDefault = lib.no_default): """ Compute median of groups, excluding missing values. @@ -1662,6 +1693,8 @@ def median(self, numeric_only=True): Series or DataFrame Median of values within each group. """ + numeric_only = self._resolve_numeric_only(numeric_only) + result = self._cython_agg_general( "median", alt=lambda x: Series(x).median(numeric_only=numeric_only), @@ -1719,8 +1752,9 @@ def var(self, ddof: int = 1): Variance of values within each group. """ if ddof == 1: + numeric_only = self._resolve_numeric_only(lib.no_default) return self._cython_agg_general( - "var", alt=lambda x: Series(x).var(ddof=ddof) + "var", alt=lambda x: Series(x).var(ddof=ddof), numeric_only=numeric_only ) else: func = lambda x: x.var(ddof=ddof) @@ -1785,7 +1819,10 @@ def size(self) -> FrameOrSeriesUnion: @final @doc(_groupby_agg_method_template, fname="sum", no=True, mc=0) - def sum(self, numeric_only: bool = True, min_count: int = 0): + def sum( + self, numeric_only: bool | lib.NoDefault = lib.no_default, min_count: int = 0 + ): + numeric_only = self._resolve_numeric_only(numeric_only) # If we are grouping on categoricals we want unobserved categories to # return zero, rather than the default of NaN which the reindexing in @@ -1802,7 +1839,11 @@ def sum(self, numeric_only: bool = True, min_count: int = 0): @final @doc(_groupby_agg_method_template, fname="prod", no=True, mc=0) - def prod(self, numeric_only: bool = True, min_count: int = 0): + def prod( + self, numeric_only: bool | lib.NoDefault = lib.no_default, min_count: int = 0 + ): + numeric_only = self._resolve_numeric_only(numeric_only) + return self._agg_general( numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod ) @@ -2731,7 +2772,7 @@ def _get_cythonized_result( how: str, cython_dtype: np.dtype, aggregate: bool = False, - numeric_only: bool = True, + numeric_only: bool | lib.NoDefault = lib.no_default, needs_counts: bool = False, needs_values: bool = False, needs_2d: bool = False, @@ -2799,6 +2840,8 @@ def _get_cythonized_result( ------- `Series` or `DataFrame` with filled values """ + numeric_only = self._resolve_numeric_only(numeric_only) + if result_is_index and aggregate: raise ValueError("'result_is_index' and 'aggregate' cannot both be True!") if post_processing and not callable(post_processing): diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index 4a8aabe41b754..cf1177d231e37 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -89,12 +89,16 @@ def test_cython_agg_boolean(): def test_cython_agg_nothing_to_agg(): frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25}) - msg = "No numeric types to aggregate" - with pytest.raises(DataError, match=msg): + with pytest.raises(NotImplementedError, match="does not implement"): + frame.groupby("a")["b"].mean(numeric_only=True) + + with pytest.raises(TypeError, match="Could not convert (foo|bar)*"): frame.groupby("a")["b"].mean() frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25}) + + msg = "No numeric types to aggregate" with pytest.raises(DataError, match=msg): frame[["b"]].groupby(frame["a"]).mean() @@ -107,9 +111,8 @@ def test_cython_agg_nothing_to_agg_with_dates(): "dates": pd.date_range("now", periods=50, freq="T"), } ) - msg = "No numeric types to aggregate" - with pytest.raises(DataError, match=msg): - frame.groupby("b").dates.mean() + with pytest.raises(NotImplementedError, match="does not implement"): + frame.groupby("b").dates.mean(numeric_only=True) def test_cython_agg_frame_columns(): @@ -170,7 +173,7 @@ def test__cython_agg_general(op, targop): df = DataFrame(np.random.randn(1000)) labels = np.random.randint(0, 50, size=1000).astype(float) - result = df.groupby(labels)._cython_agg_general(op) + result = df.groupby(labels)._cython_agg_general(op, alt=None, numeric_only=True) expected = df.groupby(labels).agg(targop) tm.assert_frame_equal(result, expected) @@ -192,7 +195,7 @@ def test_cython_agg_empty_buckets(op, targop, observed): # calling _cython_agg_general directly, instead of via the user API # which sets different values for min_count, so do that here. g = df.groupby(pd.cut(df[0], grps), observed=observed) - result = g._cython_agg_general(op) + result = g._cython_agg_general(op, alt=None, numeric_only=True) g = df.groupby(pd.cut(df[0], grps), observed=observed) expected = g.agg(lambda x: targop(x)) @@ -206,7 +209,7 @@ def test_cython_agg_empty_buckets_nanops(observed): grps = range(0, 25, 5) # add / sum result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general( - "add" + "add", alt=None, numeric_only=True ) intervals = pd.interval_range(0, 20, freq=5) expected = DataFrame( @@ -220,7 +223,7 @@ def test_cython_agg_empty_buckets_nanops(observed): # prod result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general( - "prod" + "prod", alt=None, numeric_only=True ) expected = DataFrame( {"a": [1, 1, 1716, 1]}, diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 4d8ae8d269eb6..70bdfe92602b2 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1764,7 +1764,15 @@ def test_empty_groupby(columns, keys, values, method, op, request): # GH8093 & GH26411 override_dtype = None - if isinstance(values, Categorical) and len(keys) == 1 and method == "apply": + if ( + isinstance(values, Categorical) + and not isinstance(columns, list) + and op in ["sum", "prod"] + and method != "apply" + ): + # handled below GH#41291 + pass + elif isinstance(values, Categorical) and len(keys) == 1 and method == "apply": mark = pytest.mark.xfail(raises=TypeError, match="'str' object is not callable") request.node.add_marker(mark) elif ( @@ -1825,11 +1833,36 @@ def test_empty_groupby(columns, keys, values, method, op, request): df = df.iloc[:0] gb = df.groupby(keys)[columns] - if method == "attr": - result = getattr(gb, op)() - else: - result = getattr(gb, method)(op) + def get_result(): + if method == "attr": + return getattr(gb, op)() + else: + return getattr(gb, method)(op) + + if columns == "C": + # i.e. SeriesGroupBy + if op in ["prod", "sum"]: + # ops that require more than just ordered-ness + if method != "apply": + # FIXME: apply goes through different code path + if df.dtypes[0].kind == "M": + # GH#41291 + # datetime64 -> prod and sum are invalid + msg = "datetime64 type does not support" + with pytest.raises(TypeError, match=msg): + get_result() + + return + elif isinstance(values, Categorical): + # GH#41291 + msg = "category type does not support" + with pytest.raises(TypeError, match=msg): + get_result() + + return + + result = get_result() expected = df.set_index(keys)[columns] if override_dtype is not None: expected = expected.astype(override_dtype) diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 296182a6bdbea..abe834b9fff17 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -61,7 +61,7 @@ def test_custom_grouper(index): g.ohlc() # doesn't use _cython_agg_general funcs = ["add", "mean", "prod", "min", "max", "var"] for f in funcs: - g._cython_agg_general(f) + g._cython_agg_general(f, alt=None, numeric_only=True) b = Grouper(freq=Minute(5), closed="right", label="right") g = s.groupby(b) @@ -69,7 +69,7 @@ def test_custom_grouper(index): g.ohlc() # doesn't use _cython_agg_general funcs = ["add", "mean", "prod", "min", "max", "var"] for f in funcs: - g._cython_agg_general(f) + g._cython_agg_general(f, alt=None, numeric_only=True) assert g.ngroups == 2593 assert notna(g.mean()).all() @@ -417,7 +417,7 @@ def test_resample_frame_basic(): # check all cython functions work funcs = ["add", "mean", "prod", "min", "max", "var"] for f in funcs: - g._cython_agg_general(f) + g._cython_agg_general(f, alt=None, numeric_only=True) result = df.resample("A").mean() tm.assert_series_equal(result["A"], df["A"].resample("A").mean())
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry This addresses the SeriesGroupBy (easier) half of #41291: 1) When we have numeric_only=True and non-numeric data in a SeriesGroupBy reduction, raise NotImplementedError (matching Series behavior) instead of falling back to an often-wrong fallback (xref #41341) 2) When the user doesn't explicitly pass `numeric_only=True`, change the default to False for SeriesGroupBy, leaving DataFrameGroupBy unaffected.
https://api.github.com/repos/pandas-dev/pandas/pulls/41342
2021-05-05T22:18:43Z
2021-05-26T10:16:16Z
2021-05-26T10:16:16Z
2021-05-26T17:15:55Z
TST: xfail incorrect test_empty_groupby
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index abfa2a23a4402..f716a3a44cd54 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1724,27 +1724,82 @@ def test_pivot_table_values_key_error(): [0], [0.0], ["a"], - [Categorical([0])], + Categorical([0]), [to_datetime(0)], - [date_range(0, 1, 1, tz="US/Eastern")], - [pd.array([0], dtype="Int64")], - [pd.array([0], dtype="Float64")], - [pd.array([False], dtype="boolean")], + date_range(0, 1, 1, tz="US/Eastern"), + pd.array([0], dtype="Int64"), + pd.array([0], dtype="Float64"), + pd.array([False], dtype="boolean"), ], ) @pytest.mark.parametrize("method", ["attr", "agg", "apply"]) @pytest.mark.parametrize( "op", ["idxmax", "idxmin", "mad", "min", "max", "sum", "prod", "skew"] ) -def test_empty_groupby(columns, keys, values, method, op): +def test_empty_groupby(columns, keys, values, method, op, request): # GH8093 & GH26411 + if isinstance(values, Categorical) and len(keys) == 1 and method == "apply": + mark = pytest.mark.xfail(raises=TypeError, match="'str' object is not callable") + request.node.add_marker(mark) + elif ( + isinstance(values, Categorical) + and len(keys) == 1 + and op in ["idxmax", "idxmin"] + ): + mark = pytest.mark.xfail( + raises=ValueError, match="attempt to get arg(min|max) of an empty sequence" + ) + request.node.add_marker(mark) + elif ( + isinstance(values, Categorical) + and len(keys) == 1 + and not isinstance(columns, list) + ): + mark = pytest.mark.xfail( + raises=TypeError, match="'Categorical' does not implement" + ) + request.node.add_marker(mark) + elif ( + isinstance(values, Categorical) + and len(keys) == 1 + and op in ["mad", "min", "max", "sum", "prod", "skew"] + ): + mark = pytest.mark.xfail( + raises=AssertionError, match="(DataFrame|Series) are different" + ) + request.node.add_marker(mark) + elif ( + isinstance(values, Categorical) + and len(keys) == 2 + and op in ["min", "max", "sum"] + and method != "apply" + ): + mark = pytest.mark.xfail( + raises=AssertionError, match="(DataFrame|Series) are different" + ) + request.node.add_marker(mark) + elif ( + isinstance(values, pd.core.arrays.BooleanArray) + and op in ["sum", "prod"] + and method != "apply" + ): + mark = pytest.mark.xfail( + raises=AssertionError, match="(DataFrame|Series) are different" + ) + request.node.add_marker(mark) + override_dtype = None if isinstance(values[0], bool) and op in ("prod", "sum") and method != "apply": # sum/product of bools is an integer override_dtype = "int64" - df = DataFrame([3 * values], columns=list("ABC")) + df = DataFrame({"A": values, "B": values, "C": values}, columns=list("ABC")) + + if hasattr(values, "dtype"): + # check that we did the construction right + assert (df.dtypes == values.dtype).all() + df = df.iloc[:0] gb = df.groupby(keys)[columns]
In addressing #41291 i found that this test isn't constructing the DataFrame (i think) it intends to. When the construction is fixed, a bunch of the tests fail. This fixes the construction and xfails those tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/41341
2021-05-05T21:40:16Z
2021-05-06T13:50:05Z
2021-05-06T13:50:05Z
2021-05-06T15:08:04Z