title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CLN: assorted follow-ups
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index b188602a382f5..11c419c399877 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -417,7 +417,7 @@ Dependency Minimum Version optional_extra Notes PyTables 3.6.1 hdf5 HDF5-based reading / writing blosc 1.21.0 hdf5 Compression for HDF5 zlib hdf5 Compression for HDF5 -fastparquet 0.4.0 - Parquet reading / writing (pyarrow is default) +fastparquet 0.6.3 - Parquet reading / writing (pyarrow is default) pyarrow 6.0.0 parquet, feather Parquet, ORC, and feather reading / writing pyreadstat 1.1.2 spss SPSS files (.sav) reading odfpy 1.4.1 excel Open document format (.odf, .ods, .odt) reading / writing diff --git a/pandas/core/dtypes/astype.py b/pandas/core/dtypes/astype.py index 0307cefe2e777..4dd49ec6b64bb 100644 --- a/pandas/core/dtypes/astype.py +++ b/pandas/core/dtypes/astype.py @@ -215,6 +215,7 @@ def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> Arra # Series.astype behavior pre-2.0 did # values.tz_localize("UTC").tz_convert(dtype.tz) # which did not match the DTA/DTI behavior. + # We special-case here to give a Series-specific exception message. raise TypeError( "Cannot use .astype to convert from timezone-naive dtype to " "timezone-aware dtype. Use ser.dt.tz_localize instead." diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index a75448347233c..13802581c92ad 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1414,7 +1414,7 @@ def _ensure_nanosecond_dtype(dtype: DtypeObj) -> DtypeObj: # TODO: other value-dependent functions to standardize here include -# dtypes.concat.cast_to_common_type and Index._find_common_type_compat +# Index._find_common_type_compat def find_result_type(left: ArrayLike, right: Any) -> DtypeObj: """ Find the type/dtype for a the result of an operation between these objects. diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 5b11945a8589e..91d5ac865b6b6 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -3,19 +3,12 @@ """ from __future__ import annotations -from typing import ( - TYPE_CHECKING, - cast, -) +from typing import TYPE_CHECKING import warnings import numpy as np -from pandas._typing import ( - ArrayLike, - AxisInt, - DtypeObj, -) +from pandas._typing import AxisInt from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.astype import astype_array @@ -23,10 +16,7 @@ common_dtype_categorical_compat, find_common_type, ) -from pandas.core.dtypes.common import ( - is_dtype_equal, - is_sparse, -) +from pandas.core.dtypes.common import is_dtype_equal from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, @@ -39,34 +29,6 @@ if TYPE_CHECKING: from pandas.core.arrays import Categorical - from pandas.core.arrays.sparse import SparseArray - - -def cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: - """ - Helper function for `arr.astype(common_dtype)` but handling all special - cases. - """ - if is_dtype_equal(arr.dtype, dtype): - return arr - - if is_sparse(arr) and not is_sparse(dtype): - # TODO(2.0): remove special case once SparseArray.astype deprecation - # is enforced. - # problem case: SparseArray.astype(dtype) doesn't follow the specified - # dtype exactly, but converts this to Sparse[dtype] -> first manually - # convert to dense array - - # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type - # "Union[dtype[Any], ExtensionDtype]"; expected "Union[dtype[Any], None, type, _ - # SupportsDType[dtype[Any]], str, Union[Tuple[Any, int], Tuple[Any, - # Union[SupportsIndex, Sequence[SupportsIndex]]], List[Any], _DTypeDict, - # Tuple[Any, Any]]]" [arg-type] - arr = cast("SparseArray", arr) - return arr.to_dense().astype(dtype, copy=False) # type: ignore[arg-type] - - # astype_array includes ensure_wrapped_if_datetimelike - return astype_array(arr, dtype=dtype, copy=False) def concat_compat(to_concat, axis: AxisInt = 0, ea_compat_axis: bool = False): @@ -126,7 +88,9 @@ def is_nonempty(x) -> bool: if not single_dtype: target_dtype = find_common_type([x.dtype for x in to_concat]) target_dtype = common_dtype_categorical_compat(to_concat, target_dtype) - to_concat = [cast_to_common_type(arr, target_dtype) for arr in to_concat] + to_concat = [ + astype_array(arr, target_dtype, copy=False) for arr in to_concat + ] if isinstance(to_concat[0], ABCExtensionArray): # TODO: what about EA-backed Index? diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 58859054943b3..2965baf837419 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5046,7 +5046,6 @@ def align( broadcast_axis=broadcast_axis, ) - # error: Signature of "set_axis" incompatible with supertype "NDFrame" @Appender( """ Examples diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 5fb82f1f9b72d..f06d118538c1a 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -4,7 +4,6 @@ Any, Hashable, ) -import warnings import numpy as np @@ -18,7 +17,6 @@ cache_readonly, doc, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 5cde5dbda7ae0..4556882b5280b 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -14,7 +14,6 @@ Sequence, Tuple, cast, - overload, ) import warnings @@ -3739,28 +3738,9 @@ def isin(self, values, level=None) -> npt.NDArray[np.bool_]: return np.zeros(len(levs), dtype=np.bool_) return levs.isin(values) - @overload - def set_names( - self, names, *, level=..., inplace: Literal[False] = ... - ) -> MultiIndex: - ... - - @overload - def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: - ... - - @overload - def set_names(self, names, *, level=..., inplace: bool = ...) -> MultiIndex | None: - ... - - def set_names( - self, names, *, level=None, inplace: bool = False - ) -> MultiIndex | None: - return super().set_names(names=names, level=level, inplace=inplace) - # error: Incompatible types in assignment (expression has type overloaded function, # base class "Index" defined the type as "Callable[[Index, Any, bool], Any]") - rename = set_names # type: ignore[assignment] + rename = Index.set_names # type: ignore[assignment] # --------------------------------------------------------------- # Arithmetic/Numeric Methods - Disabled diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index c8ad7dd328edf..0592db8ad608d 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -24,6 +24,7 @@ ) from pandas.util._decorators import cache_readonly +from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.cast import ( ensure_dtype_can_hold_na, find_common_type, @@ -34,10 +35,7 @@ is_scalar, needs_i8_conversion, ) -from pandas.core.dtypes.concat import ( - cast_to_common_type, - concat_compat, -) +from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, @@ -153,7 +151,7 @@ def concat_arrays(to_concat: list) -> ArrayLike: to_concat = [ arr.to_array(target_dtype) if isinstance(arr, NullArrayProxy) - else cast_to_common_type(arr, target_dtype) + else astype_array(arr, target_dtype, copy=False) for arr in to_concat ] diff --git a/pandas/core/series.py b/pandas/core/series.py index 9f05eba00b05c..bba225bb91caf 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4925,7 +4925,6 @@ def rename( else: return self._set_name(index, inplace=inplace) - # error: Signature of "set_axis" incompatible with supertype "NDFrame" @Appender( """ Examples diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 58eef2a39b37a..b28a9def8a7ea 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -11,7 +11,6 @@ Sequence, cast, ) -import warnings import numpy as np @@ -341,14 +340,7 @@ def lexsort_indexer( keys = [ensure_key_mapped(k, key) for k in keys] for k, order in zip(keys, orders): - with warnings.catch_warnings(): - # TODO(2.0): unnecessary once deprecation is enforced - # GH#45618 don't issue warning user can't do anything about - warnings.filterwarnings( - "ignore", ".*(SparseArray|SparseDtype).*", category=FutureWarning - ) - - cat = Categorical(k, ordered=True) + cat = Categorical(k, ordered=True) if na_position not in ["last", "first"]: raise ValueError(f"invalid na_position: {na_position}") diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 841def76a156f..03793055c1e85 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -3604,15 +3604,11 @@ def _background_gradient( rng = smax - smin # extend lower / upper bounds, compresses color range norm = mpl.colors.Normalize(smin - (rng * low), smax + (rng * high)) - from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 - if mpl_ge_3_6_0(): - if cmap is None: - rgbas = mpl.colormaps[mpl.rcParams["image.cmap"]](norm(gmap)) - else: - rgbas = mpl.colormaps.get_cmap(cmap)(norm(gmap)) + if cmap is None: + rgbas = mpl.colormaps[mpl.rcParams["image.cmap"]](norm(gmap)) else: - rgbas = plt.cm.get_cmap(cmap)(norm(gmap)) + rgbas = mpl.colormaps.get_cmap(cmap)(norm(gmap)) def relative_luminance(rgba) -> float: """ @@ -3891,10 +3887,8 @@ def css_calc(x, left: float, right: float, align: str, color: str | list | tuple if cmap is not None: # use the matplotlib colormap input with _mpl(Styler.bar) as (plt, mpl): - from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 - cmap = ( - (mpl.colormaps[cmap] if mpl_ge_3_6_0() else mpl.cm.get_cmap(cmap)) + mpl.colormaps[cmap] if isinstance(cmap, str) else cmap # assumed to be a Colormap instance as documented ) diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py index 86b218db4ebe6..7314f05e9f19c 100644 --- a/pandas/plotting/_matplotlib/compat.py +++ b/pandas/plotting/_matplotlib/compat.py @@ -1,8 +1,6 @@ # being a bit too dynamic from __future__ import annotations -import operator - from pandas.util.version import Version @@ -15,8 +13,3 @@ def inner(): return op(Version(mpl.__version__), Version(version)) return inner - - -mpl_ge_3_4_0 = _mpl_version("3.4.0", operator.ge) -mpl_ge_3_5_0 = _mpl_version("3.5.0", operator.ge) -mpl_ge_3_6_0 = _mpl_version("3.6.0", operator.ge) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 605cc1d94e0af..73388ee9755c5 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -55,7 +55,6 @@ from pandas.core.frame import DataFrame from pandas.io.formats.printing import pprint_thing -from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters from pandas.plotting._matplotlib.groupby import reconstruct_data_with_by from pandas.plotting._matplotlib.misc import unpack_single_str_list @@ -1229,19 +1228,13 @@ def _make_plot(self): c_values = c if self.colormap is not None: - if mpl_ge_3_6_0(): - cmap = mpl.colormaps.get_cmap(self.colormap) - else: - cmap = self.plt.cm.get_cmap(self.colormap) + cmap = mpl.colormaps.get_cmap(self.colormap) else: # cmap is only used if c_values are integers, otherwise UserWarning if is_integer_dtype(c_values): # pandas uses colormap, matplotlib uses cmap. cmap = "Greys" - if mpl_ge_3_6_0(): - cmap = mpl.colormaps[cmap] - else: - cmap = self.plt.cm.get_cmap(cmap) + cmap = mpl.colormaps[cmap] else: cmap = None @@ -1309,10 +1302,7 @@ def _make_plot(self) -> None: ax = self.axes[0] # pandas uses colormap, matplotlib uses cmap. cmap = self.colormap or "BuGn" - if mpl_ge_3_6_0(): - cmap = mpl.colormaps.get_cmap(cmap) - else: - cmap = self.plt.cm.get_cmap(cmap) + cmap = mpl.colormaps.get_cmap(cmap) cb = self.kwds.pop("colorbar", True) if C is None: diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index f060255c4e9c5..839da35a8ae83 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -10,7 +10,6 @@ import warnings import matplotlib as mpl -from matplotlib import cm import matplotlib.colors import numpy as np @@ -21,8 +20,6 @@ import pandas.core.common as com -from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 - if TYPE_CHECKING: from matplotlib.colors import Colormap @@ -153,10 +150,7 @@ def _get_cmap_instance(colormap: str | Colormap) -> Colormap: """Get instance of matplotlib colormap.""" if isinstance(colormap, str): cmap = colormap - if mpl_ge_3_6_0(): - colormap = mpl.colormaps[colormap] - else: - colormap = cm.get_cmap(colormap) + colormap = mpl.colormaps[colormap] if colormap is None: raise ValueError(f"Colormap {cmap} is not recognized") return colormap diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 1749c05a555a1..eecfcbc72f489 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -22,8 +22,6 @@ ABCSeries, ) -from pandas.plotting._matplotlib import compat - if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.axis import Axis @@ -396,10 +394,7 @@ def handle_shared_axes( row_num = lambda x: x.get_subplotspec().rowspan.start col_num = lambda x: x.get_subplotspec().colspan.start - if compat.mpl_ge_3_4_0(): - is_first_col = lambda x: x.get_subplotspec().is_first_col() - else: - is_first_col = lambda x: x.is_first_col() + is_first_col = lambda x: x.get_subplotspec().is_first_col() if nrows > 1: try: @@ -421,10 +416,7 @@ def handle_shared_axes( except IndexError: # if gridspec is used, ax.rowNum and ax.colNum may different # from layout shape. in this case, use last_row logic - if compat.mpl_ge_3_4_0(): - is_last_row = lambda x: x.get_subplotspec().is_last_row() - else: - is_last_row = lambda x: x.is_last_row() + is_last_row = lambda x: x.get_subplotspec().is_last_row() for ax in axarr: if is_last_row(ax): continue diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 95d4d6629f608..529dd6baa70c0 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -28,7 +28,6 @@ Int64Index, UInt64Index, ) -from pandas.core.arrays import TimedeltaArray from pandas.core.computation import expressions as expr from pandas.tests.arithmetic.common import ( assert_invalid_addsub_type, @@ -210,15 +209,10 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box_with_array): index = numeric_idx expected = TimedeltaIndex([Timedelta(days=n) for n in range(len(index))]) if isinstance(scalar_td, np.timedelta64): - # TODO(2.0): once TDA.astype converts to m8, just do expected.astype - tda = expected._data dtype = scalar_td.dtype - expected = type(tda)._simple_new(tda._ndarray.astype(dtype), dtype=dtype) + expected = expected.astype(dtype) elif type(scalar_td) is timedelta: - # TODO(2.0): once TDA.astype converts to m8, just do expected.astype - tda = expected._data - dtype = np.dtype("m8[us]") - expected = type(tda)._simple_new(tda._ndarray.astype(dtype), dtype=dtype) + expected = expected.astype("m8[us]") index = tm.box_expected(index, box) expected = tm.box_expected(expected, box) @@ -251,11 +245,7 @@ def test_numeric_arr_mul_tdscalar_numexpr_path( expected = arr_i8.view("timedelta64[D]").astype("timedelta64[ns]") if type(scalar_td) is timedelta: - # TODO(2.0): this shouldn't depend on 'box' expected = expected.astype("timedelta64[us]") - # TODO(2.0): won't be necessary to construct TimedeltaArray - # explicitly. - expected = TimedeltaArray._simple_new(expected, dtype=expected.dtype) expected = tm.box_expected(expected, box, transpose=False) @@ -272,18 +262,13 @@ def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array expected = TimedeltaIndex(["3 Days", "36 Hours"]) if isinstance(three_days, np.timedelta64): - # TODO(2.0): just use expected.astype - tda = expected._data dtype = three_days.dtype if dtype < np.dtype("m8[s]"): # i.e. resolution is lower -> use lowest supported resolution dtype = np.dtype("m8[s]") - expected = type(tda)._simple_new(tda._ndarray.astype(dtype), dtype=dtype) + expected = expected.astype(dtype) elif type(three_days) is timedelta: - # TODO(2.0): just use expected.astype - tda = expected._data - dtype = np.dtype("m8[us]") - expected = type(tda)._simple_new(tda._ndarray.astype(dtype), dtype=dtype) + expected = expected.astype("m8[us]") index = tm.box_expected(index, box) expected = tm.box_expected(expected, box) diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index 3f34f20e53cc1..96ef49acdcb21 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -420,11 +420,9 @@ def test_astype_to_datetime_unit(self, unit): if unit in ["ns", "us", "ms", "s"]: # GH#48928 - exp_dtype = dtype result = df.astype(dtype) else: # we use the nearest supported dtype (i.e. M8[s]) - exp_dtype = "M8[s]" msg = rf"Cannot cast DatetimeArray to dtype datetime64\[{unit}\]" with pytest.raises(TypeError, match=msg): df.astype(dtype) @@ -440,32 +438,24 @@ def test_astype_to_datetime_unit(self, unit): return - # TODO(2.0): once DataFrame constructor doesn't cast ndarray inputs. - # can simplify this - exp_values = arr.astype(exp_dtype) - exp_dta = pd.core.arrays.DatetimeArray._simple_new( - exp_values, dtype=exp_values.dtype - ) - exp_df = DataFrame(exp_dta) - assert (exp_df.dtypes == exp_dtype).all() - + exp_df = DataFrame(arr.astype(dtype)) + assert (exp_df.dtypes == dtype).all() tm.assert_frame_equal(result, exp_df) res_ser = ser.astype(dtype) exp_ser = exp_df.iloc[:, 0] - assert exp_ser.dtype == exp_dtype + assert exp_ser.dtype == dtype tm.assert_series_equal(res_ser, exp_ser) exp_dta = exp_ser._values res_index = idx.astype(dtype) - # TODO(2.0): should be able to just call pd.Index(exp_ser) - exp_index = pd.DatetimeIndex._simple_new(exp_dta, name=idx.name) - assert exp_index.dtype == exp_dtype + exp_index = pd.Index(exp_ser) + assert exp_index.dtype == dtype tm.assert_index_equal(res_index, exp_index) res_dta = dta.astype(dtype) - assert exp_dta.dtype == exp_dtype + assert exp_dta.dtype == dtype tm.assert_extension_array_equal(res_dta, exp_dta) @pytest.mark.parametrize("unit", ["ns"]) diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py index 38de589d0c60c..034514cb0bcfb 100644 --- a/pandas/tests/groupby/test_allowlist.py +++ b/pandas/tests/groupby/test_allowlist.py @@ -74,6 +74,8 @@ def raw_frame(multiindex_dataframe_random_data): @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.filterwarnings("ignore:Using the level keyword:FutureWarning") +@pytest.mark.filterwarnings("ignore:The default value of numeric_only:FutureWarning") def test_regression_allowlist_methods(raw_frame, op, level, axis, skipna, sort): # GH6944 # GH 17537 diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 7e4df5ae8699c..c5b135880ee8c 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -668,7 +668,6 @@ def test_get_indexer_mixed_dtypes(self, target): ([date(9999, 1, 1), date(9999, 1, 1)], [-1, -1]), ], ) - @pytest.mark.filterwarnings("ignore:Comparison of Timestamp.*:FutureWarning") def test_get_indexer_out_of_bounds_date(self, target, positions): values = DatetimeIndex([Timestamp("2020-01-01"), Timestamp("2020-01-02")]) diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index ff4b8564f86ca..4b7140b112bd9 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -82,14 +82,9 @@ def test_get_loc_key_unit_mismatch(self): assert loc == 1 def test_get_loc_key_unit_mismatch_not_castable(self): - # TODO(2.0): once TDA.astype supports m8[s] directly, tdi - # can be constructed directly - tda = to_timedelta(["0 days", "1 days", "2 days"])._data - arr = np.array(tda).astype("m8[s]") - tda2 = type(tda)._simple_new(arr, dtype=arr.dtype) - tdi = TimedeltaIndex(tda2) + tdi = to_timedelta(["0 days", "1 days", "2 days"]).astype("m8[s]") assert tdi.dtype == "m8[s]" - key = tda[0]._as_unit("ns") + Timedelta(1) + key = tdi[0]._as_unit("ns") + Timedelta(1) with pytest.raises(KeyError, match=r"Timedelta\('0 days 00:00:00.000000001'\)"): tdi.get_loc(key) diff --git a/pandas/tests/io/formats/style/test_matplotlib.py b/pandas/tests/io/formats/style/test_matplotlib.py index f0c4152e3339b..c19f27dc064d1 100644 --- a/pandas/tests/io/formats/style/test_matplotlib.py +++ b/pandas/tests/io/formats/style/test_matplotlib.py @@ -13,7 +13,6 @@ import matplotlib as mpl from pandas.io.formats.style import Styler -from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 @pytest.fixture @@ -263,7 +262,7 @@ def test_background_gradient_gmap_wrong_series(styler_blank): @pytest.mark.parametrize( "cmap", - ["PuBu", mpl.colormaps["PuBu"] if mpl_ge_3_6_0() else mpl.cm.get_cmap("PuBu")], + ["PuBu", mpl.colormaps["PuBu"]], ) def test_bar_colormap(cmap): data = DataFrame([[1, 2], [3, 4]]) diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 6567504894236..20de38ebf6665 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -510,18 +510,10 @@ def _unpack_cycler(self, rcParams, field="color"): return [v[field] for v in rcParams["axes.prop_cycle"]] def get_x_axis(self, ax): - from pandas.plotting._matplotlib.compat import mpl_ge_3_5_0 - - if mpl_ge_3_5_0(): - return ax._shared_axes["x"] - return ax._shared_x_axes + return ax._shared_axes["x"] def get_y_axis(self, ax): - from pandas.plotting._matplotlib.compat import mpl_ge_3_5_0 - - if mpl_ge_3_5_0(): - return ax._shared_axes["y"] - return ax._shared_y_axes + return ax._shared_axes["y"] def _check_plot_works(f, filterwarnings="always", default_axes=False, **kwargs): diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 476f0a89980ea..73b723ba7f597 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -32,15 +32,10 @@ from pandas.io.formats.printing import pprint_thing -try: - from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 -except ImportError: - mpl_ge_3_6_0 = lambda: True - @td.skip_if_no_mpl class TestDataFramePlots(TestPlotBase): - @pytest.mark.xfail(mpl_ge_3_6_0(), reason="Api changed") + @pytest.mark.xfail(reason="Api changed in 3.6.0") @pytest.mark.slow def test_plot(self): df = tm.makeTimeDataFrame() @@ -735,7 +730,6 @@ def test_plot_scatter_with_categorical_data(self, x, y): _check_plot_works(df.plot.scatter, x=x, y=y) def test_plot_scatter_with_c(self): - from pandas.plotting._matplotlib.compat import mpl_ge_3_4_0 df = DataFrame( np.random.randint(low=0, high=100, size=(6, 4)), @@ -748,10 +742,7 @@ def test_plot_scatter_with_c(self): # default to Greys assert ax.collections[0].cmap.name == "Greys" - if mpl_ge_3_4_0(): - assert ax.collections[0].colorbar.ax.get_ylabel() == "z" - else: - assert ax.collections[0].colorbar._label == "z" + assert ax.collections[0].colorbar.ax.get_ylabel() == "z" cm = "cubehelix" ax = df.plot.scatter(x="x", y="y", c="z", colormap=cm) diff --git a/pandas/tests/plotting/frame/test_frame_color.py b/pandas/tests/plotting/frame/test_frame_color.py index 2e860c2615322..ed129d315a0c6 100644 --- a/pandas/tests/plotting/frame/test_frame_color.py +++ b/pandas/tests/plotting/frame/test_frame_color.py @@ -199,14 +199,13 @@ def test_if_scatterplot_colorbars_are_next_to_parent_axes(self): @pytest.mark.parametrize("kw", ["c", "color"]) def test_scatter_with_c_column_name_with_colors(self, cmap, kw): # https://github.com/pandas-dev/pandas/issues/34316 - from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 df = DataFrame( [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]], columns=["length", "width"], ) df["species"] = ["r", "r", "g", "g", "b"] - if mpl_ge_3_6_0() and cmap is not None: + if cmap is not None: with tm.assert_produces_warning(UserWarning, check_stacklevel=False): ax = df.plot.scatter(x=0, y=1, cmap=cmap, **{kw: "species"}) else: diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index f75e5cd3491a4..3149fa9cb2095 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -39,11 +39,6 @@ from pandas.core.indexes.timedeltas import timedelta_range from pandas.tests.plotting.common import TestPlotBase -try: - from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 -except ImportError: - mpl_ge_3_6_0 = lambda: True - from pandas.tseries.offsets import WeekOfMonth @@ -265,7 +260,7 @@ def test_plot_multiple_inferred_freq(self): ser = Series(np.random.randn(len(dr)), index=dr) _check_plot_works(ser.plot) - @pytest.mark.xfail(mpl_ge_3_6_0(), reason="Api changed") + @pytest.mark.xfail(reason="Api changed in 3.6.0") def test_uhf(self): import pandas.plotting._matplotlib.converter as conv @@ -1215,7 +1210,7 @@ def test_secondary_legend(self): # TODO: color cycle problems assert len(colors) == 4 - @pytest.mark.xfail(mpl_ge_3_6_0(), reason="Api changed") + @pytest.mark.xfail(reason="Api changed in 3.6.0") def test_format_date_axis(self): rng = date_range("1/1/2012", periods=12, freq="M") df = DataFrame(np.random.randn(len(rng), 3), rng) diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index dc586d15ba115..9d90f2e405803 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -18,11 +18,6 @@ _check_plot_works, ) -try: - from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 -except ImportError: - mpl_ge_3_6_0 = lambda: True - @pytest.fixture def ts(): @@ -196,7 +191,7 @@ def test_hist_kwargs(self, ts): ax = ts.plot.hist(align="left", stacked=True, ax=ax) tm.close() - @pytest.mark.xfail(mpl_ge_3_6_0(), reason="Api changed") + @pytest.mark.xfail(reason="Api changed in 3.6.0") @td.skip_if_no_scipy def test_hist_kde(self, ts): diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index d8004ad563196..d9505b4d593e6 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -20,11 +20,6 @@ _check_plot_works, ) -try: - from pandas.plotting._matplotlib.compat import mpl_ge_3_6_0 -except ImportError: - mpl_ge_3_6_0 = lambda: True - @pytest.fixture def ts(): @@ -497,7 +492,7 @@ def test_kde_missing_vals(self): # gh-14821: check if the values have any missing values assert any(~np.isnan(axes.lines[0].get_xdata())) - @pytest.mark.xfail(mpl_ge_3_6_0(), reason="Api changed") + @pytest.mark.xfail(reason="Api changed in 3.6.0") def test_boxplot_series(self, ts): _, ax = self.plt.subplots() ax = ts.plot.box(logy=True, ax=ax)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49489
2022-11-02T23:32:52Z
2022-11-04T16:56:55Z
2022-11-04T16:56:54Z
2022-11-04T17:13:05Z
CI: Remove GHA running on 1.4.x branch
diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml index 8c9f0b594f321..cf8a0fe0da91c 100644 --- a/.github/workflows/32-bit-linux.yml +++ b/.github/workflows/32-bit-linux.yml @@ -5,12 +5,10 @@ on: branches: - main - 1.5.x - - 1.4.x pull_request: branches: - main - 1.5.x - - 1.4.x paths-ignore: - "doc/**" diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index 6aff77c708378..738263369409c 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -5,12 +5,10 @@ on: branches: - main - 1.5.x - - 1.4.x pull_request: branches: - main - 1.5.x - - 1.4.x env: ENV_FILE: environment.yml diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml index cfb4966847721..48a08d4febbaf 100644 --- a/.github/workflows/docbuild-and-upload.yml +++ b/.github/workflows/docbuild-and-upload.yml @@ -5,14 +5,12 @@ on: branches: - main - 1.5.x - - 1.4.x tags: - '*' pull_request: branches: - main - 1.5.x - - 1.4.x env: ENV_FILE: environment.yml diff --git a/.github/workflows/macos-windows.yml b/.github/workflows/macos-windows.yml index 63c065fa3d701..81c798c9d86c3 100644 --- a/.github/workflows/macos-windows.yml +++ b/.github/workflows/macos-windows.yml @@ -5,12 +5,10 @@ on: branches: - main - 1.5.x - - 1.4.x pull_request: branches: - main - 1.5.x - - 1.4.x paths-ignore: - "doc/**" diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index b725f6812bc3b..7c4b36dab109d 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -25,12 +25,10 @@ on: branches: - main - 1.5.x - - 1.4.x pull_request: branches: - main - 1.5.x - - 1.4.x paths-ignore: - "doc/**" diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 7c20545105009..9957fc72e9f51 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -5,12 +5,10 @@ on: branches: - main - 1.5.x - - 1.4.x pull_request: branches: - main - 1.5.x - - 1.4.x types: [labeled, opened, synchronize, reopened] paths-ignore: - "doc/**" diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 072e2523c9727..69bde1d812e07 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -5,12 +5,10 @@ on: branches: - main - 1.5.x - - 1.4.x pull_request: branches: - main - 1.5.x - - 1.4.x paths-ignore: - "doc/**"
null
https://api.github.com/repos/pandas-dev/pandas/pulls/49488
2022-11-02T23:13:56Z
2022-11-03T08:25:19Z
2022-11-03T08:25:19Z
2022-11-03T16:00:15Z
DEPR: maybe_promote with pydate
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index c76555f9ef417..5c9b911ee0222 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -423,6 +423,7 @@ Removal of prior version deprecations/changes - Changed behavior of empty data passed into :class:`Series`; the default dtype will be ``object`` instead of ``float64`` (:issue:`29405`) - Changed the behavior of :meth:`DatetimeIndex.union`, :meth:`DatetimeIndex.intersection`, and :meth:`DatetimeIndex.symmetric_difference` with mismatched timezones to convert to UTC instead of casting to object dtype (:issue:`39328`) - Changed the behavior of :func:`to_datetime` with argument "now" with ``utc=False`` to match ``Timestamp("now")`` (:issue:`18705`) +- Changed the behavior of :meth:`Index.reindex`, :meth:`Series.reindex`, and :meth:`DataFrame.reindex` with a ``datetime64`` dtype and a ``datetime.date`` object for ``fill_value``; these are no longer considered equivalent to ``datetime.datetime`` objects so the reindex casts to object dtype (:issue:`39767`) - Changed behavior of :meth:`SparseArray.astype` when given a dtype that is not explicitly ``SparseDtype``, cast to the exact requested dtype rather than silently using a ``SparseDtype`` instead (:issue:`34457`) - Changed behavior of :meth:`Index.ravel` to return a view on the original :class:`Index` instead of a ``np.ndarray`` (:issue:`36900`) - Changed behavior of :meth:`Index.to_frame` with explicit ``name=None`` to use ``None`` for the column name instead of the index's name or default ``0`` (:issue:`45523`) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 793f407b78714..1c7effa93f4ef 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -5,7 +5,6 @@ from __future__ import annotations from datetime import ( - date, datetime, timedelta, ) @@ -615,41 +614,14 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): if inferred == dtype: return dtype, fv - # TODO(2.0): once this deprecation is enforced, this whole case - # becomes equivalent to: - # dta = DatetimeArray._from_sequence([], dtype="M8[ns]") - # try: - # fv = dta._validate_setitem_value(fill_value) - # return dta.dtype, fv - # except (ValueError, TypeError): - # return _dtype_obj, fill_value - if isinstance(fill_value, date) and not isinstance(fill_value, datetime): - # deprecate casting of date object to match infer_dtype_from_scalar - # and DatetimeArray._validate_setitem_value - try: - fv = Timestamp(fill_value).to_datetime64() - except OutOfBoundsDatetime: - pass - else: - warnings.warn( - "Using a `date` object for fill_value with `datetime64[ns]` " - "dtype is deprecated. In a future version, this will be cast " - "to object dtype. Pass `fill_value=Timestamp(date_obj)` instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return dtype, fv - elif isinstance(fill_value, str): - try: - # explicitly wrap in str to convert np.str_ - fv = Timestamp(str(fill_value)) - except (ValueError, TypeError): - pass - else: - if isna(fv) or fv.tz is None: - return dtype, fv.asm8 + from pandas.core.arrays import DatetimeArray - return np.dtype("object"), fill_value + dta = DatetimeArray._from_sequence([], dtype="M8[ns]") + try: + fv = dta._validate_setitem_value(fill_value) + return dta.dtype, fv + except (ValueError, TypeError): + return _dtype_obj, fill_value elif issubclass(dtype.type, np.timedelta64): inferred, fv = infer_dtype_from_scalar(fill_value, pandas_dtype=True) diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index bfecbbbfc0435..c54e99f2c2225 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -25,7 +25,6 @@ from pandas.core.dtypes.missing import isna import pandas as pd -import pandas._testing as tm @pytest.fixture( @@ -403,15 +402,12 @@ def test_maybe_promote_any_with_datetime64(any_numpy_dtype_reduced, fill_value): expected_dtype = np.dtype(object) exp_val_for_scalar = fill_value - warn = None - msg = "Using a `date` object for fill_value" if type(fill_value) is datetime.date and dtype.kind == "M": - # Casting date to dt64 is deprecated - warn = FutureWarning + # Casting date to dt64 is deprecated, in 2.0 enforced to cast to object + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value - with tm.assert_produces_warning(warn, match=msg): - # stacklevel is chosen to make sense when called from higher-level functions - _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) @pytest.mark.parametrize( diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index a132519970721..b30bd69806a94 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -136,30 +136,32 @@ def test_reindex_copies(self): @td.skip_array_manager_not_yet_implemented def test_reindex_date_fill_value(self): - # passing date to dt64 is deprecated + # passing date to dt64 is deprecated; enforced in 2.0 to cast to object arr = date_range("2016-01-01", periods=6).values.reshape(3, 2) df = DataFrame(arr, columns=["A", "B"], index=range(3)) ts = df.iloc[0, 0] fv = ts.date() - with tm.assert_produces_warning(FutureWarning): - res = df.reindex(index=range(4), columns=["A", "B", "C"], fill_value=fv) + res = df.reindex(index=range(4), columns=["A", "B", "C"], fill_value=fv) expected = DataFrame( - {"A": df["A"].tolist() + [ts], "B": df["B"].tolist() + [ts], "C": [ts] * 4} + {"A": df["A"].tolist() + [fv], "B": df["B"].tolist() + [fv], "C": [fv] * 4}, + dtype=object, ) tm.assert_frame_equal(res, expected) # only reindexing rows - with tm.assert_produces_warning(FutureWarning): - res = df.reindex(index=range(4), fill_value=fv) + res = df.reindex(index=range(4), fill_value=fv) tm.assert_frame_equal(res, expected[["A", "B"]]) # same with a datetime-castable str res = df.reindex( index=range(4), columns=["A", "B", "C"], fill_value="2016-01-01" ) + expected = DataFrame( + {"A": df["A"].tolist() + [ts], "B": df["B"].tolist() + [ts], "C": [ts] * 4}, + ) tm.assert_frame_equal(res, expected) def test_reindex_with_multi_index(self):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49487
2022-11-02T23:08:13Z
2022-11-07T18:13:55Z
2022-11-07T18:13:55Z
2022-11-07T18:15:14Z
DEPR: Change str.replace(regex) from True to False & single behavior
diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst index d350351075cb6..b4ae1d27df2b5 100644 --- a/doc/source/user_guide/text.rst +++ b/doc/source/user_guide/text.rst @@ -267,14 +267,16 @@ i.e., from the end of the string to the beginning of the string: s3 s3.str.replace("^.a|dog", "XX-XX ", case=False, regex=True) -.. warning:: - Some caution must be taken when dealing with regular expressions! The current behavior - is to treat single character patterns as literal strings, even when ``regex`` is set - to ``True``. This behavior is deprecated and will be removed in a future version so - that the ``regex`` keyword is always respected. +.. versionchanged:: 2.0 + +Single character pattern with ``regex=True`` will also be treated as regular expressions: + +.. ipython:: python -.. versionchanged:: 1.2.0 + s4 = pd.Series(["a.b", ".", "b", np.nan, ""], dtype="string") + s4 + s4.str.replace(".", "a", regex=True) If you want literal replacement of a string (equivalent to :meth:`str.replace`), you can set the optional ``regex`` parameter to ``False``, rather than escaping each diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index d71160cdbc369..478cf234b0908 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -311,6 +311,7 @@ Removal of prior version deprecations/changes - Changed behavior of :class:`Index` constructor when passed a ``SparseArray`` or ``SparseDtype`` to retain that dtype instead of casting to ``numpy.ndarray`` (:issue:`43930`) - Changed behavior of :class:`Index`, :class:`Series`, :class:`DataFrame` constructors with floating-dtype data and a :class:`DatetimeTZDtype`, the data are now interpreted as UTC-times instead of wall-times, consistent with how integer-dtype data are treated (:issue:`45573`) - Removed the deprecated ``base`` and ``loffset`` arguments from :meth:`pandas.DataFrame.resample`, :meth:`pandas.Series.resample` and :class:`pandas.Grouper`. Use ``offset`` or ``origin`` instead (:issue:`31809`) +- Change the default argument of ``regex`` for :meth:`Series.str.replace` from ``True`` to ``False``. Additionally, a single character ``pat`` with ``regex=True`` is now treated as a regular expression instead of a string literal. (:issue:`36695`, :issue:`24804`) - Changed behavior of :meth:`DataFrame.any` and :meth:`DataFrame.all` with ``bool_only=True``; object-dtype columns with all-bool values will no longer be included, manually cast to ``bool`` dtype first (:issue:`46188`) - Changed behavior of comparison of a :class:`Timestamp` with a ``datetime.date`` object; these now compare as un-equal and raise on inequality comparisons, matching the ``datetime.datetime`` behavior (:issue:`36131`) - Enforced deprecation of silently dropping columns that raised a ``TypeError`` in :class:`Series.transform` and :class:`DataFrame.transform` when used with a list or dictionary (:issue:`43740`) diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 0024cbcb01bfc..71a50c69bfee1 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -1323,7 +1323,7 @@ def replace( n: int = -1, case: bool | None = None, flags: int = 0, - regex: bool | None = None, + regex: bool = False, ): r""" Replace each occurrence of pattern/regex in the Series/Index. @@ -1351,7 +1351,7 @@ def replace( flags : int, default 0 (no flags) Regex module flags, e.g. re.IGNORECASE. Cannot be set if `pat` is a compiled regex. - regex : bool, default True + regex : bool, default False Determines if the passed-in pattern is a regular expression: - If True, assumes the passed-in pattern is a regular expression. @@ -1359,8 +1359,6 @@ def replace( - Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. - .. versionadded:: 0.23.0 - Returns ------- Series or Index of object @@ -1444,20 +1442,6 @@ def replace( 2 NaN dtype: object """ - if regex is None: - if isinstance(pat, str) and any(c in pat for c in ".+*|^$?[](){}\\"): - # warn only in cases where regex behavior would differ from literal - msg = ( - "The default value of regex will change from True to False " - "in a future version." - ) - if len(pat) == 1: - msg += ( - " In addition, single character regular expressions will " - "*not* be treated as literal strings when regex=True." - ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) - # Check whether repl is valid (GH 13438, GH 15055) if not (isinstance(repl, str) or callable(repl)): raise TypeError("repl must be a string or callable") @@ -1476,14 +1460,6 @@ def replace( elif callable(repl): raise ValueError("Cannot use a callable replacement when regex=False") - # The current behavior is to treat single character patterns as literal strings, - # even when ``regex`` is set to ``True``. - if isinstance(pat, str) and len(pat) == 1: - regex = False - - if regex is None: - regex = True - if case is None: case = True diff --git a/pandas/tests/strings/test_find_replace.py b/pandas/tests/strings/test_find_replace.py index 59b41e0ec944a..6f6acb7a996b2 100644 --- a/pandas/tests/strings/test_find_replace.py +++ b/pandas/tests/strings/test_find_replace.py @@ -423,7 +423,7 @@ def test_replace_callable_raises(any_string_dtype, repl): with tm.maybe_produces_warning( PerformanceWarning, any_string_dtype == "string[pyarrow]" ): - values.str.replace("a", repl) + values.str.replace("a", repl, regex=True) def test_replace_callable_named_groups(any_string_dtype): @@ -477,7 +477,7 @@ def test_replace_compiled_regex_unicode(any_string_dtype): with tm.maybe_produces_warning( PerformanceWarning, any_string_dtype == "string[pyarrow]" ): - result = ser.str.replace(pat, ", ") + result = ser.str.replace(pat, ", ", regex=True) tm.assert_series_equal(result, expected) @@ -490,13 +490,13 @@ def test_replace_compiled_regex_raises(any_string_dtype): msg = "case and flags cannot be set when pat is a compiled regex" with pytest.raises(ValueError, match=msg): - ser.str.replace(pat, "", flags=re.IGNORECASE) + ser.str.replace(pat, "", flags=re.IGNORECASE, regex=True) with pytest.raises(ValueError, match=msg): - ser.str.replace(pat, "", case=False) + ser.str.replace(pat, "", case=False, regex=True) with pytest.raises(ValueError, match=msg): - ser.str.replace(pat, "", case=True) + ser.str.replace(pat, "", case=True, regex=True) def test_replace_compiled_regex_callable(any_string_dtype): @@ -507,7 +507,7 @@ def test_replace_compiled_regex_callable(any_string_dtype): with tm.maybe_produces_warning( PerformanceWarning, any_string_dtype == "string[pyarrow]" ): - result = ser.str.replace(pat, repl, n=2) + result = ser.str.replace(pat, repl, n=2, regex=True) expected = Series(["foObaD__baRbaD", np.nan], dtype=any_string_dtype) tm.assert_series_equal(result, expected) @@ -617,48 +617,25 @@ def test_replace_not_case_sensitive_not_regex(any_string_dtype): tm.assert_series_equal(result, expected) -def test_replace_regex_default_warning(any_string_dtype): +def test_replace_regex(any_string_dtype): # https://github.com/pandas-dev/pandas/pull/24809 s = Series(["a", "b", "ac", np.nan, ""], dtype=any_string_dtype) - msg = ( - "The default value of regex will change from True to False in a " - "future version\\.$" - ) - - with tm.assert_produces_warning( - FutureWarning, - match=msg, - raise_on_extra_warnings=any_string_dtype != "string[pyarrow]", - ): - result = s.str.replace("^.$", "a") + result = s.str.replace("^.$", "a", regex=True) expected = Series(["a", "a", "ac", np.nan, ""], dtype=any_string_dtype) tm.assert_series_equal(result, expected) -@pytest.mark.parametrize("regex", [True, False, None]) +@pytest.mark.parametrize("regex", [True, False]) def test_replace_regex_single_character(regex, any_string_dtype): - # https://github.com/pandas-dev/pandas/pull/24809 - - # The current behavior is to treat single character patterns as literal strings, - # even when ``regex`` is set to ``True``. - + # https://github.com/pandas-dev/pandas/pull/24809, enforced in 2.0 + # GH 24804 s = Series(["a.b", ".", "b", np.nan, ""], dtype=any_string_dtype) - if regex is None: - msg = re.escape( - "The default value of regex will change from True to False in a future " - "version. In addition, single character regular expressions will *not* " - "be treated as literal strings when regex=True." - ) - with tm.assert_produces_warning( - FutureWarning, - match=msg, - ): - result = s.str.replace(".", "a", regex=regex) + result = s.str.replace(".", "a", regex=regex) + if regex: + expected = Series(["aaa", "a", "a", np.nan, ""], dtype=any_string_dtype) else: - result = s.str.replace(".", "a", regex=regex) - - expected = Series(["aab", "a", "b", np.nan, ""], dtype=any_string_dtype) + expected = Series(["aab", "a", "b", np.nan, ""], dtype=any_string_dtype) tm.assert_series_equal(result, expected)
- [x] closes #24804 (Replace xxxx with the GitHub issue number) Introduced in #36695
https://api.github.com/repos/pandas-dev/pandas/pulls/49486
2022-11-02T22:57:20Z
2022-11-03T16:27:13Z
2022-11-03T16:27:12Z
2022-11-03T20:13:56Z
DEPR: Index.reindex with non-unique Index
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index d71160cdbc369..ac3a1ca95fbb4 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -250,6 +250,7 @@ Removal of prior version deprecations/changes - Disallow passing non-keyword arguments to :meth:`DataFrame.where` and :meth:`Series.where` except for ``cond`` and ``other`` (:issue:`41523`) - Disallow passing non-keyword arguments to :meth:`Series.set_axis` and :meth:`DataFrame.set_axis` except for ``labels`` (:issue:`41491`) - Disallow passing non-keyword arguments to :meth:`Series.rename_axis` and :meth:`DataFrame.rename_axis` except for ``mapper`` (:issue:`47587`) +- Disallow :meth:`Index.reindex` with non-unique :class:`Index` objects (:issue:`42568`) - Disallow passing non-keyword arguments to :meth:`Series.clip` and :meth:`DataFrame.clip` (:issue:`41511`) - Disallow passing non-keyword arguments to :meth:`Series.bfill`, :meth:`Series.ffill`, :meth:`DataFrame.bfill` and :meth:`DataFrame.ffill` (:issue:`41508`) - Disallow passing non-keyword arguments to :meth:`DataFrame.replace`, :meth:`Series.replace` except for ``to_replace`` and ``value`` (:issue:`47587`) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b2ea46ce1399e..b5854c77d3709 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4339,12 +4339,7 @@ def reindex( if not self.is_unique: # GH#42568 - warnings.warn( - "reindexing with a non-unique Index is deprecated and " - "will raise in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) + raise ValueError("cannot reindex on an axis with duplicate labels") target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 58b533cb576d9..e435dfbc6ab40 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -414,12 +414,7 @@ def reindex( indexer, missing = self.get_indexer_non_unique(target) if not self.is_unique: # GH#42568 - warnings.warn( - "reindexing with a non-unique Index is deprecated and will " - "raise in a future version.", - FutureWarning, - stacklevel=find_stack_level(), - ) + raise ValueError("cannot reindex on an axis with duplicate labels") new_target: Index if len(self) and indexer is not None: diff --git a/pandas/tests/indexes/categorical/test_reindex.py b/pandas/tests/indexes/categorical/test_reindex.py index 1337eff1f1c2f..8ca5c6099b4e7 100644 --- a/pandas/tests/indexes/categorical/test_reindex.py +++ b/pandas/tests/indexes/categorical/test_reindex.py @@ -1,4 +1,5 @@ import numpy as np +import pytest from pandas import ( Categorical, @@ -12,37 +13,28 @@ class TestReindex: def test_reindex_list_non_unique(self): # GH#11586 + msg = "cannot reindex on an axis with duplicate labels" ci = CategoricalIndex(["a", "b", "c", "a"]) - with tm.assert_produces_warning(FutureWarning, match="non-unique"): - res, indexer = ci.reindex(["a", "c"]) - - tm.assert_index_equal(res, Index(["a", "a", "c"]), exact=True) - tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp)) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) def test_reindex_categorical_non_unique(self): + msg = "cannot reindex on an axis with duplicate labels" ci = CategoricalIndex(["a", "b", "c", "a"]) - with tm.assert_produces_warning(FutureWarning, match="non-unique"): - res, indexer = ci.reindex(Categorical(["a", "c"])) - - exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"]) - tm.assert_index_equal(res, exp, exact=True) - tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp)) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) def test_reindex_list_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) - with tm.assert_produces_warning(FutureWarning, match="non-unique"): - res, indexer = ci.reindex(["a", "c"]) - exp = Index(["a", "a", "c"], dtype="object") - tm.assert_index_equal(res, exp, exact=True) - tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp)) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) def test_reindex_categorical_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) - with tm.assert_produces_warning(FutureWarning, match="non-unique"): - res, indexer = ci.reindex(Categorical(["a", "c"])) - exp = CategoricalIndex(["a", "a", "c"], categories=["a", "c"]) - tm.assert_index_equal(res, exp, exact=True) - tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp)) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) def test_reindex_duplicate_target(self): # See GH25459 diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 769fd2d4a05eb..0c264c107d3d6 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -700,8 +700,7 @@ def test_asfreq_non_unique(): msg = "cannot reindex on an axis with duplicate labels" with pytest.raises(ValueError, match=msg): - with tm.assert_produces_warning(FutureWarning, match="non-unique"): - ts.asfreq("B") + ts.asfreq("B") def test_resample_axis1():
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/49485
2022-11-02T22:21:53Z
2022-11-03T08:32:48Z
2022-11-03T08:32:48Z
2022-11-03T16:15:53Z
DEPR: HDFStore.iteritems, read_csv(use_cols) behavior
diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index d71160cdbc369..a7eb59e409c9f 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -178,7 +178,7 @@ Removal of prior version deprecations/changes - Enforced deprecation changing behavior when passing ``datetime64[ns]`` dtype data and timezone-aware dtype to :class:`Series`, interpreting the values as wall-times instead of UTC times, matching :class:`DatetimeIndex` behavior (:issue:`41662`) - Removed deprecated :meth:`DataFrame._AXIS_NUMBERS`, :meth:`DataFrame._AXIS_NAMES`, :meth:`Series._AXIS_NUMBERS`, :meth:`Series._AXIS_NAMES` (:issue:`33637`) - Removed deprecated :meth:`Index.to_native_types`, use ``obj.astype(str)`` instead (:issue:`36418`) -- Removed deprecated :meth:`Series.iteritems`, :meth:`DataFrame.iteritems`, use ``obj.items`` instead (:issue:`45321`) +- Removed deprecated :meth:`Series.iteritems`, :meth:`DataFrame.iteritems` and :meth:`HDFStore.iteritems` use ``obj.items`` instead (:issue:`45321`) - Removed deprecated :meth:`DatetimeIndex.union_many` (:issue:`45018`) - Removed deprecated ``weekofyear`` and ``week`` attributes of :class:`DatetimeArray`, :class:`DatetimeIndex` and ``dt`` accessor in favor of ``isocalendar().week`` (:issue:`33595`) - Removed deprecated :meth:`RangeIndex._start`, :meth:`RangeIndex._stop`, :meth:`RangeIndex._step`, use ``start``, ``stop``, ``step`` instead (:issue:`30482`) @@ -281,6 +281,7 @@ Removal of prior version deprecations/changes - Removed ``pandas.SparseSeries`` and ``pandas.SparseDataFrame``, including pickle support. (:issue:`30642`) - Enforced disallowing passing an integer ``fill_value`` to :meth:`DataFrame.shift` and :meth:`Series.shift`` with datetime64, timedelta64, or period dtypes (:issue:`32591`) - Enforced disallowing a string column label into ``times`` in :meth:`DataFrame.ewm` (:issue:`43265`) +- Enforced disallowing using ``usecols`` with out of bounds indices for ``read_csv`` with ``engine="c"`` (:issue:`25623`) - Enforced disallowing the use of ``**kwargs`` in :class:`.ExcelWriter`; use the keyword argument ``engine_kwargs`` instead (:issue:`40430`) - Enforced disallowing a tuple of column labels into :meth:`.DataFrameGroupBy.__getitem__` (:issue:`30546`) - Enforced disallowing setting values with ``.loc`` using a positional slice. Use ``.loc`` with labels or ``.iloc`` with positions instead (:issue:`31840`) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 4af2caf0cdb93..a5b07d46bfeef 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -13,6 +13,7 @@ import sys import time import warnings +from pandas.errors import ParserError from pandas.util._exceptions import find_stack_level from pandas import StringDtype @@ -971,11 +972,9 @@ cdef class TextReader: all(isinstance(u, int) for u in self.usecols)): missing_usecols = [col for col in self.usecols if col >= num_cols] if missing_usecols: - warnings.warn( - "Defining usecols with out of bounds indices is deprecated " - "and will raise a ParserError in a future version.", - FutureWarning, - stacklevel=find_stack_level(), + raise ParserError( + "Defining usecols without of bounds indices is not allowed. " + f"{missing_usecols} are out of bounds.", ) results = {} diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 68e734b122c6f..d28162ce4d0be 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -1528,8 +1528,6 @@ class ExcelFile: - Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed, then ``openpyxl`` will be used. - Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised. - - Otherwise ``xlrd`` will be used and a ``FutureWarning`` will be raised. - This case will raise a ``ValueError`` in a future version of pandas. .. warning:: diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 4f15443ed5610..52fd88a9f5f6e 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -20,7 +20,6 @@ Sequence, cast, ) -import warnings import numpy as np @@ -34,7 +33,6 @@ EmptyDataError, ParserError, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import is_dict_like @@ -595,11 +593,9 @@ def _handle_usecols( col for col in self.usecols if col >= num_original_columns ] if missing_usecols: - warnings.warn( - "Defining usecols with out of bounds indices is deprecated " - "and will raise a ParserError in a future version.", - FutureWarning, - stacklevel=find_stack_level(), + raise ParserError( + "Defining usecols without of bounds indices is not allowed. " + f"{missing_usecols} are out of bounds.", ) col_indices = self.usecols diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 1f2bb4c5d21b4..edfeaf92c7d4e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -684,18 +684,6 @@ def items(self) -> Iterator[tuple[str, list]]: for g in self.groups(): yield g._v_pathname, g - def iteritems(self): - """ - iterate on key->group - """ - warnings.warn( - "iteritems is deprecated and will be removed in a future version. " - "Use .items instead.", - FutureWarning, - stacklevel=find_stack_level(), - ) - yield from self.items() - def open(self, mode: str = "a", **kwargs) -> None: """ Open the file in the specified mode diff --git a/pandas/tests/io/parser/usecols/test_usecols_basic.py b/pandas/tests/io/parser/usecols/test_usecols_basic.py index aef1937dcf287..bbf159845b1d6 100644 --- a/pandas/tests/io/parser/usecols/test_usecols_basic.py +++ b/pandas/tests/io/parser/usecols/test_usecols_basic.py @@ -7,6 +7,8 @@ import numpy as np import pytest +from pandas.errors import ParserError + from pandas import ( DataFrame, Index, @@ -402,20 +404,14 @@ def test_usecols_subset_names_mismatch_orig_columns(all_parsers, usecols): @pytest.mark.parametrize("names", [None, ["a", "b"]]) def test_usecols_indices_out_of_bounds(all_parsers, names): - # GH#25623 + # GH#25623 & GH 41130; enforced in 2.0 parser = all_parsers data = """ a,b 1,2 """ - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False - ): - result = parser.read_csv(StringIO(data), usecols=[0, 2], names=names, header=0) - expected = DataFrame({"a": [1], "b": [None]}) - if names is None and parser.engine == "python": - expected = DataFrame({"a": [1]}) - tm.assert_frame_equal(result, expected) + with pytest.raises(ParserError, match="Defining usecols without of bounds"): + parser.read_csv(StringIO(data), usecols=[0, 2], names=names, header=0) def test_usecols_additional_columns(all_parsers): diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index 08b1ee3f0ddbe..3add6e1482687 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -1009,15 +1009,6 @@ def test_to_hdf_with_object_column_names(tmp_path, setup_path): assert len(result) -def test_hdfstore_iteritems_deprecated(tmp_path, setup_path): - path = tmp_path / setup_path - df = DataFrame({"a": [1]}) - with HDFStore(path, mode="w") as hdf: - hdf.put("table", df) - with tm.assert_produces_warning(FutureWarning): - next(hdf.iteritems()) - - def test_hdfstore_strides(setup_path): # GH22073 df = DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})
Introduced in #41130, #45321
https://api.github.com/repos/pandas-dev/pandas/pulls/49483
2022-11-02T22:15:17Z
2022-11-03T16:25:32Z
2022-11-03T16:25:32Z
2022-11-03T16:25:36Z
BUG: pd.NA.__format__ fails with format_specs
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 92f7c0f6b59a3..2a112a9f141e1 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -908,6 +908,7 @@ Missing - Clarified documentation on interpolate with method =akima. The ``der`` parameter must be scalar or None (:issue:`33426`) - :meth:`DataFrame.interpolate` uses the correct axis convention now. Previously interpolating along columns lead to interpolation along indices and vice versa. Furthermore interpolating with methods ``pad``, ``ffill``, ``bfill`` and ``backfill`` are identical to using these methods with :meth:`fillna` (:issue:`12918`, :issue:`29146`) - Bug in :meth:`DataFrame.interpolate` when called on a DataFrame with column names of string type was throwing a ValueError. The method is no independing of the type of column names (:issue:`33956`) +- passing :class:`NA` will into a format string using format specs will now work. For example ``"{:.1f}".format(pd.NA)`` would previously raise a ``ValueError``, but will now return the string ``"<NA>"`` (:issue:`34740`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index 6d4d1e95fe8c3..fdd06fe631b97 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -349,6 +349,12 @@ class NAType(C_NAType): def __repr__(self) -> str: return "<NA>" + def __format__(self, format_spec) -> str: + try: + return self.__repr__().__format__(format_spec) + except ValueError: + return self.__repr__() + def __bool__(self): raise TypeError("boolean value of NA is ambiguous") diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py index a0e3f8984fbe4..dc5eb15348c1b 100644 --- a/pandas/tests/scalar/test_na_scalar.py +++ b/pandas/tests/scalar/test_na_scalar.py @@ -22,6 +22,17 @@ def test_repr(): assert str(NA) == "<NA>" +def test_format(): + # GH-34740 + assert format(NA) == "<NA>" + assert format(NA, ">10") == " <NA>" + assert format(NA, "xxx") == "<NA>" # NA is flexible, accept any format spec + + assert "{}".format(NA) == "<NA>" + assert "{:>10}".format(NA) == " <NA>" + assert "{:xxx}".format(NA) == "<NA>" + + def test_truthiness(): msg = "boolean value of NA is ambiguous"
``pd.NA`` fails if passed to a format string and format parameters are supplied. This is different behaviour than ``np.nan`` and makes converting arrays containing ``pd.NA`` to strings very brittle and annoying. Examples: ```python >>> format(pd.NA) '<NA>' # master and PR, ok >>> format(pd.NA, ".1f") TypeError # master '<NA>' # this PR >>> format(pd.NA, ">5") TypeError # master ' <NA>' # this PR, tries to behave like a string, then falls back to '<NA>', like np.na ``` The new behaviour mirrors the behaviour of ``np.nan``.
https://api.github.com/repos/pandas-dev/pandas/pulls/34740
2020-06-13T08:22:28Z
2020-06-15T21:36:37Z
2020-06-15T21:36:37Z
2020-06-15T22:34:22Z
PERF: avoid copy in replace
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index e2a778f729470..13b98279169fd 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -906,8 +906,7 @@ def putmask( mask = _extract_bool_array(mask) assert not isinstance(new, (ABCIndexClass, ABCSeries, ABCDataFrame)) - new_values = self.values if inplace else self.values.copy() - + new_values = self.values # delay copy if possible. # if we are passed a scalar None, convert it here if not is_list_like(new) and isna(new) and not self.is_object: # FIXME: make sure we have compatible NA @@ -917,7 +916,7 @@ def putmask( # We only get here for non-Extension Blocks, so _try_coerce_args # is only relevant for DatetimeBlock and TimedeltaBlock if lib.is_scalar(new): - new = convert_scalar_for_putitemlike(new, new_values.dtype) + new = convert_scalar_for_putitemlike(new, self.values.dtype) if transpose: new_values = new_values.T @@ -929,6 +928,8 @@ def putmask( new = np.repeat(new, new_values.shape[-1]).reshape(self.shape) new = new.astype(new_values.dtype) + if new_values is self.values and not inplace: + new_values = new_values.copy() # we require exact matches between the len of the # values we are setting (or is compat). np.putmask # doesn't check this and will simply truncate / pad @@ -1000,6 +1001,8 @@ def f(mask, val, idx): return [self] if transpose: + if new_values is None: + new_values = self.values if inplace else self.values.copy() new_values = new_values.T return [self.make_block(new_values)]
Closes #34136. Hopefully this preserves the right behavior. I could imagine breaking something if a caller was relying on `putmask(., inplace=False)` returning a copy. ``` import pandas as pd import numpy as np df = pd.DataFrame({"A": 0, "B": 0}, index=range(4 * 10 ** 7)) # the 1 can be held in self._df.blocks[0], while the inf and -inf cant %timeit df.replace([np.inf, -np.inf, 1], np.nan, inplace=False) # 1.0.3 483 ms ± 10.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) # master 900 ms ± 18.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) # PR 490 ms ± 8.64 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/34737
2020-06-12T21:11:45Z
2020-06-14T14:34:36Z
2020-06-14T14:34:35Z
2020-06-14T14:34:41Z
BUG: Fixed regression in PeriodIndex loc
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 49cb78340d104..19ebe46fca302 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -508,7 +508,11 @@ def get_loc(self, key, method=None, tolerance=None): # _get_string_slice will handle cases where grp < freqn assert grp >= freqn - if grp == freqn: + # BusinessDay is a bit strange. It has a *lower* code, but we never parse + # a string as "BusinessDay" resolution, just Day. + if grp == freqn or ( + reso == Resolution.RESO_DAY and self.dtype.freq.name == "B" + ): key = Period(asdt, freq=self.freq) loc = self.get_loc(key, method=method, tolerance=tolerance) return loc diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index eaba0bb3793b2..12454c20d2bb4 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -693,6 +693,12 @@ def test_get_value(self): result2 = idx2.get_value(input2, p1) tm.assert_series_equal(result2, expected2) + def test_loc_str(self): + # https://github.com/pandas-dev/pandas/issues/33964 + index = pd.period_range(start="2000", periods=20, freq="B") + series = pd.Series(range(20), index=index) + assert series.loc["2000-01-14"] == 9 + @pytest.mark.parametrize("freq", ["H", "D"]) def test_get_value_datetime_hourly(self, freq): # get_loc and get_value should treat datetime objects symmetrically
Closes https://github.com/pandas-dev/pandas/issues/33964. This is the smallest change I could get. I'm not sure what a more comprehensive fix would look like. Regression only on master, so no need for a whatsnew.
https://api.github.com/repos/pandas-dev/pandas/pulls/34736
2020-06-12T20:04:21Z
2020-06-14T16:39:59Z
2020-06-14T16:39:59Z
2020-06-14T16:40:03Z
PERF: Fixed perf regression in TimedeltaIndex.get_loc
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 8af23815b54ef..1fea6ca1b8a3d 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -776,15 +776,19 @@ def _validate_shift_value(self, fill_value): return self._unbox(fill_value) - def _validate_scalar(self, value, msg: str, cast_str: bool = False): + def _validate_scalar( + self, value, msg: Optional[str] = None, cast_str: bool = False + ): """ Validate that the input value can be cast to our scalar_type. Parameters ---------- value : object - msg : str + msg : str, optional. Message to raise in TypeError on invalid input. + If not provided, `value` is cast to a str and used + as the message. cast_str : bool, default False Whether to try to parse string input to scalar_type. @@ -807,6 +811,8 @@ def _validate_scalar(self, value, msg: str, cast_str: bool = False): value = self._scalar_type(value) # type: ignore else: + if msg is None: + msg = str(value) raise TypeError(msg) return value diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index a14994866c0f7..f6661c6b50dfb 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -213,9 +213,8 @@ def get_loc(self, key, method=None, tolerance=None): if not is_scalar(key): raise InvalidIndexError(key) - msg = str(key) try: - key = self._data._validate_scalar(key, msg, cast_str=True) + key = self._data._validate_scalar(key, cast_str=True) except TypeError as err: raise KeyError(key) from err
Closes https://github.com/pandas-dev/pandas/issues/34510 1.0.3 ```python In [10]: index = pd.timedelta_range(start="1985", periods=1000, freq="D") ...: timedelta = index[500] ...: ...: %timeit index.get_loc(timedelta) 3.7 µs ± 275 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) ``` This PR ``` 3.25 µs ± 67.7 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/34734
2020-06-12T16:59:19Z
2020-06-15T14:38:54Z
2020-06-15T14:38:54Z
2020-06-15T14:39:04Z
BUG: Fixed Series.replace for EA with casting
diff --git a/doc/source/whatsnew/v1.0.5.rst b/doc/source/whatsnew/v1.0.5.rst index 7dfac54279e6f..fdf08dd381050 100644 --- a/doc/source/whatsnew/v1.0.5.rst +++ b/doc/source/whatsnew/v1.0.5.rst @@ -24,6 +24,8 @@ Note this disables the ability to read Parquet files from directories on S3 again (:issue:`26388`, :issue:`34632`), which was added in the 1.0.4 release, but is now targeted for pandas 1.1.0. +- Fixed regression in :meth:`~DataFrame.replace` raising an ``AssertionError`` when replacing values in an extension dtype with values of a different dtype (:issue:`34530`) + .. _whatsnew_105.bug_fixes: Bug fixes diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 13b98279169fd..38c495e1dd0f3 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -745,7 +745,11 @@ def replace( if is_object_dtype(self): raise - assert not self._can_hold_element(value), value + if not self.is_extension: + # TODO: https://github.com/pandas-dev/pandas/issues/32586 + # Need an ExtensionArray._can_hold_element to indicate whether + # a scalar value can be placed in the array. + assert not self._can_hold_element(value), value # try again with a compatible block block = self.astype(object) diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index 330c682216f53..8f57cf3191d5d 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -402,3 +402,8 @@ def test_replace_only_one_dictlike_arg(self): msg = "Series.replace cannot use dict-value and non-None to_replace" with pytest.raises(ValueError, match=msg): ser.replace(to_replace, value) + + def test_replace_extension_other(self): + # https://github.com/pandas-dev/pandas/issues/34530 + ser = pd.Series(pd.array([1, 2, 3], dtype="Int64")) + ser.replace("", "") # no exception
Closes https://github.com/pandas-dev/pandas/issues/34530
https://api.github.com/repos/pandas-dev/pandas/pulls/34733
2020-06-12T16:30:49Z
2020-06-15T19:21:21Z
2020-06-15T19:21:20Z
2020-06-16T09:46:26Z
Changed the way a few sentences were written
diff --git a/README.md b/README.md index 7edee8d3feeed..a72e8402e68a0 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ ## What is it? -**pandas** is a Python package providing fast, flexible, and expressive data +**pandas** is a Python package that provides fast, flexible, and expressive data structures designed to make working with "relational" or "labeled" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has @@ -154,11 +154,11 @@ For usage questions, the best place to go to is [StackOverflow](https://stackove Further, general questions and discussions can also take place on the [pydata mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata). ## Discussion and Development -Most development discussion is taking place on github in this repo. Further, the [pandas-dev mailing list](https://mail.python.org/mailman/listinfo/pandas-dev) can also be used for specialized discussions or design issues, and a [Gitter channel](https://gitter.im/pydata/pandas) is available for quick development related questions. +Most development discussions take place on github in this repo. Further, the [pandas-dev mailing list](https://mail.python.org/mailman/listinfo/pandas-dev) can also be used for specialized discussions or design issues, and a [Gitter channel](https://gitter.im/pydata/pandas) is available for quick development related questions. ## Contributing to pandas [![Open Source Helpers](https://www.codetriage.com/pandas-dev/pandas/badges/users.svg)](https://www.codetriage.com/pandas-dev/pandas) -All contributions, bug reports, bug fixes, documentation improvements, enhancements and ideas are welcome. +All contributions, bug reports, bug fixes, documentation improvements, enhancements, and ideas are welcome. A detailed overview on how to contribute can be found in the **[contributing guide](https://pandas.pydata.org/docs/dev/development/contributing.html)**. There is also an [overview](.github/CONTRIBUTING.md) on GitHub. diff --git a/setup.py b/setup.py index 9f411ec10cd80..3caea5c5e79da 100755 --- a/setup.py +++ b/setup.py @@ -117,7 +117,7 @@ def build_extensions(self): DESCRIPTION = "Powerful data structures for data analysis, time series, and statistics" LONG_DESCRIPTION = """ -**pandas** is a Python package providing fast, flexible, and expressive data +**pandas** is a Python package that provides fast, flexible, and expressive data structures designed to make working with structured (tabular, multidimensional, potentially heterogeneous) and time series data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical,
Added comma before 'and' - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34729
2020-06-12T08:46:42Z
2020-06-12T20:57:24Z
2020-06-12T20:57:24Z
2020-06-12T21:05:20Z
Debug CI Issue
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index a8f49d91f040e..789a4668b6fee 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -11,6 +11,8 @@ _np_version_under1p16 = _nlv < LooseVersion("1.16") _np_version_under1p17 = _nlv < LooseVersion("1.17") _np_version_under1p18 = _nlv < LooseVersion("1.18") +_np_version_under1p19 = _nlv < LooseVersion("1.19") +_np_version_under1p20 = _nlv < LooseVersion("1.20") _is_numpy_dev = ".dev" in str(_nlv) diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py index 65e32d716a4db..154fcdc38826d 100644 --- a/pandas/tests/extension/base/dtype.py +++ b/pandas/tests/extension/base/dtype.py @@ -68,18 +68,22 @@ def test_check_dtype(self, data): {"A": pd.Series(data, dtype=dtype), "B": data, "C": "foo", "D": 1} ) - # np.dtype('int64') == 'Int64' == 'int64' - # so can't distinguish - if dtype.name == "Int64": - expected = pd.Series([True, True, False, True], index=list("ABCD")) - else: - expected = pd.Series([True, True, False, False], index=list("ABCD")) - - # FIXME: This should probably be *fixed* not ignored. - # See libops.scalar_compare + # TODO(numpy-1.20): This warnings filter and if block can be removed + # once we require numpy>=1.20 with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) result = df.dtypes == str(dtype) + # NumPy>=1.20.0, but not pandas.compat.numpy till there + # is a wheel available with this change. + try: + new_numpy_behavior = np.dtype("int64") != "Int64" + except TypeError: + new_numpy_behavior = True + + if dtype.name == "Int64" and not new_numpy_behavior: + expected = pd.Series([True, True, False, True], index=list("ABCD")) + else: + expected = pd.Series([True, True, False, False], index=list("ABCD")) self.assert_series_equal(result, expected) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 27039948dfc16..0b0d23632e827 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -96,13 +96,17 @@ def test_bootstrap_plot(self): class TestDataFramePlots(TestPlotBase): @td.skip_if_no_scipy def test_scatter_matrix_axis(self): + from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0 + scatter_matrix = plotting.scatter_matrix with tm.RNGContext(42): df = DataFrame(randn(100, 3)) # we are plotting multiples on a sub-plot - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning( + UserWarning, raise_on_extra_warnings=_mpl_ge_3_0_0() + ): axes = _check_plot_works( scatter_matrix, filterwarnings="always", frame=df, range_padding=0.1 )
https://api.github.com/repos/pandas-dev/pandas/pulls/34721
2020-06-11T22:17:59Z
2020-06-12T20:54:56Z
2020-06-12T20:54:56Z
2020-06-15T09:25:07Z
Removed __div__ impls from Cython
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 25ee821afe7bd..31155ac93931a 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -5,8 +5,7 @@ dependencies: - python=3.7.* # tools - # Cython pin for https://github.com/pandas-dev/pandas/issues/34704 - - cython==0.29.19 + - cython>=0.29.16 - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio diff --git a/environment.yml b/environment.yml index bfe0e78c891cf..b81404094fa4c 100644 --- a/environment.yml +++ b/environment.yml @@ -12,7 +12,7 @@ dependencies: - asv # building - - cython=0.29.19 + - cython>=0.29.16 # code checks - black=19.10b0 diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index b5f5ef0a3f593..95881ebf1385c 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -424,11 +424,6 @@ cdef class Interval(IntervalMixin): return Interval(y.left * self, y.right * self, closed=y.closed) return NotImplemented - def __div__(self, y): - if isinstance(y, numbers.Number): - return Interval(self.left / y, self.right / y, closed=self.closed) - return NotImplemented - def __truediv__(self, y): if isinstance(y, numbers.Number): return Interval(self.left / y, self.right / y, closed=self.closed) diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index f079c5157eeb3..71f151e6eb876 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -221,9 +221,6 @@ cdef class _NaT(datetime): def __neg__(self): return NaT - def __div__(self, other): - return _nat_divide_op(self, other) - def __truediv__(self, other): return _nat_divide_op(self, other) diff --git a/requirements-dev.txt b/requirements-dev.txt index 791dc7cd79128..754ec7ae28748 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,7 +5,7 @@ numpy>=1.15 python-dateutil>=2.7.3 pytz asv -cython==0.29.19 +cython>=0.29.16 black==19.10b0 cpplint flake8<3.8.0
Alternate to #34711 which may close #34704
https://api.github.com/repos/pandas-dev/pandas/pulls/34718
2020-06-11T20:32:36Z
2020-06-14T15:57:50Z
2020-06-14T15:57:50Z
2020-06-15T16:20:31Z
DOC: updated pandas/core/series.py for SS06 errors
diff --git a/pandas/core/series.py b/pandas/core/series.py index b51c08fa592d5..b32a4c36a8247 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2935,8 +2935,10 @@ def combine_first(self, other) -> "Series": def update(self, other) -> None: """ - Modify Series in place using non-NA values from passed - Series. Aligns on index. + Modify Series in place using values from passed Series. + + Uses non-NA values from passed Series to make updates. Aligns + on index. Parameters ---------- @@ -3451,6 +3453,8 @@ def sort_index( def argsort(self, axis=0, kind="quicksort", order=None) -> "Series": """ + Return the integer indices that would sort the Series values. + Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. @@ -3733,8 +3737,7 @@ def reorder_levels(self, order) -> "Series": def explode(self) -> "Series": """ - Transform each element of a list-like to a row, replicating the - index values. + Transform each element of a list-like to a row. .. versionadded:: 0.25.0 @@ -3792,6 +3795,7 @@ def explode(self) -> "Series": def unstack(self, level=-1, fill_value=None): """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. + The level involved will automatically get sorted. Parameters @@ -4825,8 +4829,7 @@ def to_timestamp(self, freq=None, how="start", copy=True) -> "Series": def to_period(self, freq=None, copy=True) -> "Series": """ - Convert Series from DatetimeIndex to PeriodIndex with desired - frequency (inferred from index if not passed). + Convert Series from DatetimeIndex to PeriodIndex. Parameters ----------
https://api.github.com/repos/pandas-dev/pandas/pulls/34716
2020-06-11T19:23:29Z
2020-06-12T20:58:28Z
2020-06-12T20:58:28Z
2020-06-12T20:58:35Z
DOC: updated core/groupby/generic.py for SS06 errors
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 3d07f90bf7f94..5894066dd33c8 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1573,8 +1573,10 @@ def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame: def filter(self, func, dropna=True, *args, **kwargs): """ - Return a copy of a DataFrame excluding elements from groups that - do not satisfy the boolean criterion specified by func. + Return a copy of a DataFrame excluding filtered elements. + + Elements from groups are filtered if they do not satisfy the + boolean criterion specified by func. Parameters ---------- @@ -1835,8 +1837,7 @@ def count(self): def nunique(self, dropna: bool = True): """ - Return DataFrame with number of distinct observations per group for - each column. + Return DataFrame with counts of unique elements in each position. Parameters ----------
https://api.github.com/repos/pandas-dev/pandas/pulls/34715
2020-06-11T18:46:50Z
2020-06-12T20:59:56Z
2020-06-12T20:59:56Z
2020-06-12T20:59:56Z
DOC: updated core/indexes/base.py for SS06 errors
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 240882e561bc6..4a99d2dfe339a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -609,9 +609,10 @@ def view(self, cls=None): def astype(self, dtype, copy=True): """ - Create an Index with values cast to dtypes. The class of a new Index - is determined by dtype. When conversion is impossible, a ValueError - exception is raised. + Create an Index with values cast to dtypes. + + The class of a new Index is determined by dtype. When conversion is + impossible, a ValueError exception is raised. Parameters ---------- @@ -2197,8 +2198,9 @@ def dropna(self, how="any"): def unique(self, level=None): """ - Return unique values in the index. Uniques are returned in order - of appearance, this does NOT sort. + Return unique values in the index. + + Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- @@ -2675,8 +2677,7 @@ def intersection(self, other, sort=False): def difference(self, other, sort=None): """ - Return a new Index with elements from the index that are not in - `other`. + Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. @@ -3271,8 +3272,7 @@ def _can_reindex(self, indexer): def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ - Create index with target's values (move/add/delete values - as necessary). + Create index with target's values. Parameters ---------- @@ -4253,8 +4253,7 @@ def equals(self, other: Any) -> bool: def identical(self, other) -> bool: """ - Similar to equals, but check that other comparable attributes are - also equal. + Similar to equals, but checks that object attributes and types are also equal. Returns ------- @@ -4340,8 +4339,7 @@ def asof(self, label): def asof_locs(self, where, mask): """ - Find the locations (indices) of the labels from the index for - every entry in the `where` argument. + Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the @@ -4551,8 +4549,9 @@ def argsort(self, *args, **kwargs) -> np.ndarray: def get_value(self, series: "Series", key): """ - Fast lookup of value from 1-dimensional ndarray. Only use this if you - know what you're doing. + Fast lookup of value from 1-dimensional ndarray. + + Only use this if you know what you're doing. Returns ------- @@ -4905,8 +4904,9 @@ def _get_string_slice(self, key: str_t, use_lhs: bool = True, use_rhs: bool = Tr def slice_indexer(self, start=None, end=None, step=None, kind=None): """ - For an ordered or unique index, compute the slice indexer for input - labels and step. + Compute the slice indexer for input labels and step. + + Index needs to be ordered and unique. Parameters ----------
https://api.github.com/repos/pandas-dev/pandas/pulls/34713
2020-06-11T16:54:01Z
2020-06-12T21:00:56Z
2020-06-12T21:00:56Z
2020-06-12T21:51:07Z
BLD: Pin cython for 37-locale build
diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 31155ac93931a..25ee821afe7bd 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -5,7 +5,8 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + # Cython pin for https://github.com/pandas-dev/pandas/issues/34704 + - cython==0.29.19 - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio diff --git a/environment.yml b/environment.yml index b81404094fa4c..bfe0e78c891cf 100644 --- a/environment.yml +++ b/environment.yml @@ -12,7 +12,7 @@ dependencies: - asv # building - - cython>=0.29.16 + - cython=0.29.19 # code checks - black=19.10b0 diff --git a/requirements-dev.txt b/requirements-dev.txt index 754ec7ae28748..791dc7cd79128 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,7 +5,7 @@ numpy>=1.15 python-dateutil>=2.7.3 pytz asv -cython>=0.29.16 +cython==0.29.19 black==19.10b0 cpplint flake8<3.8.0
Seeing if this resolves the build failure. xref https://github.com/pandas-dev/pandas/issues/34704
https://api.github.com/repos/pandas-dev/pandas/pulls/34711
2020-06-11T13:47:58Z
2020-06-11T15:26:58Z
2020-06-11T15:26:58Z
2020-06-15T16:17:38Z
BUG: DataFrame.unstack with non-consolidated
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 133fba0246497..391313fbb5283 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -41,8 +41,7 @@ class _Unstacker: Parameters ---------- - index : object - Pandas ``Index`` + index : MultiIndex level : int or str, default last level Level to "unstack". Accepts a name for the level. fill_value : scalar, optional @@ -83,7 +82,7 @@ class _Unstacker: """ def __init__( - self, index, level=-1, constructor=None, + self, index: MultiIndex, level=-1, constructor=None, ): if constructor is None: @@ -415,7 +414,7 @@ def unstack(obj, level, fill_value=None): level = obj.index._get_level_number(level) if isinstance(obj, DataFrame): - if isinstance(obj.index, MultiIndex) or not obj._can_fast_transpose: + if isinstance(obj.index, MultiIndex): return _unstack_frame(obj, level, fill_value=fill_value) else: return obj.T.stack(dropna=False) diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 2e707342a0793..a6c4089dc71e6 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -140,6 +140,17 @@ def test_stack_mixed_level(self): expected = expected[["a", "b"]] tm.assert_frame_equal(result, expected) + def test_unstack_not_consolidated(self): + # Gh#34708 + df = pd.DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]}) + df2 = df[["x"]] + df2["y"] = df["y"] + assert len(df2._mgr.blocks) == 2 + + res = df2.unstack() + expected = df.unstack() + tm.assert_series_equal(res, expected) + def test_unstack_fill(self): # GH #9746: fill_value keyword argument for Series
- [x] closes #34708 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34709
2020-06-11T00:32:04Z
2020-06-14T14:13:01Z
2020-06-14T14:13:01Z
2020-06-14T15:10:18Z
PERF: is_date_array_normalized
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 152e9a5ad7ddc..40b2d44235d8b 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -445,7 +445,7 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, cdef _TSObject create_tsobject_tz_using_offset(npy_datetimestruct dts, - int tzoffset, object tz=None): + int tzoffset, tzinfo tz=None): """ Convert a datetimestruct `dts`, along with initial timezone offset `tzoffset` to a _TSObject (with timezone object `tz` - optional). @@ -847,7 +847,7 @@ cdef inline int64_t _normalize_i8_stamp(int64_t local_val) nogil: @cython.wraparound(False) @cython.boundscheck(False) -def is_date_array_normalized(const int64_t[:] stamps, object tz=None): +def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None): """ Check if all of the given (nanosecond) timestamps are normalized to midnight, i.e. hour == minute == second == 0. If the optional timezone @@ -867,20 +867,20 @@ def is_date_array_normalized(const int64_t[:] stamps, object tz=None): ndarray[int64_t] trans int64_t[:] deltas intp_t[:] pos - npy_datetimestruct dts int64_t local_val, delta str typ + int64_t day_nanos = 24 * 3600 * 1_000_000_000 if tz is None or is_utc(tz): for i in range(n): - dt64_to_dtstruct(stamps[i], &dts) - if (dts.hour + dts.min + dts.sec + dts.us) > 0: + local_val = stamps[i] + if local_val % day_nanos != 0: return False + elif is_tzlocal(tz): for i in range(n): local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - dt64_to_dtstruct(local_val, &dts) - if (dts.hour + dts.min + dts.sec + dts.us) > 0: + if local_val % day_nanos != 0: return False else: trans, deltas, typ = get_dst_info(tz) @@ -890,16 +890,16 @@ def is_date_array_normalized(const int64_t[:] stamps, object tz=None): delta = deltas[0] for i in range(n): # Adjust datetime64 timestamp, recompute datetimestruct - dt64_to_dtstruct(stamps[i] + delta, &dts) - if (dts.hour + dts.min + dts.sec + dts.us) > 0: + local_val = stamps[i] + delta + if local_val % day_nanos != 0: return False else: pos = trans.searchsorted(stamps) - 1 for i in range(n): # Adjust datetime64 timestamp, recompute datetimestruct - dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) - if (dts.hour + dts.min + dts.sec + dts.us) > 0: + local_val = stamps[i] + deltas[pos[i]] + if local_val % day_nanos != 0: return False return True
Same optimization we made in normalize_i8_timestamps the other day.
https://api.github.com/repos/pandas-dev/pandas/pulls/34707
2020-06-10T21:57:37Z
2020-06-14T14:13:47Z
2020-06-14T14:13:47Z
2020-06-14T15:09:14Z
TST: Add test for rolling window, see GH 34605
diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index f9b0e6856337b..8d72e2cb92ca9 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -663,3 +663,36 @@ def test_iter_rolling_datetime(expected, expected_index, window): for (expected, actual) in zip(expected, ser.rolling(window)): tm.assert_series_equal(actual, expected) + + +@pytest.mark.parametrize( + "grouping,_index", + [ + ( + {"level": 0}, + pd.MultiIndex.from_tuples( + [(0, 0), (0, 0), (1, 1), (1, 1), (1, 1)], names=[None, None] + ), + ), + ( + {"by": "X"}, + pd.MultiIndex.from_tuples( + [(0, 0), (1, 0), (2, 1), (3, 1), (4, 1)], names=["X", None] + ), + ), + ], +) +def test_rolling_positional_argument(grouping, _index, raw): + # GH 34605 + + def scaled_sum(*args): + if len(args) < 2: + raise ValueError("The function needs two arguments") + array, scale = args + return array.sum() / scale + + df = DataFrame(data={"X": range(5)}, index=[0, 0, 1, 1, 1]) + + expected = DataFrame(data={"X": [0.0, 0.5, 1.0, 1.5, 2.0]}, index=_index) + result = df.groupby(**grouping).rolling(1).apply(scaled_sum, raw=raw, args=(2,)) + tm.assert_frame_equal(result, expected)
- [X] closes #34605 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Dear maintainers, I'm opening this PR to address issue #34605. ~I've made the test conditional to Python 3.8 as I'm using the positional only syntax as strong check. If this is not desired, let me know and I will change the function to explicitly check for the length of `*args`.~ Edit: maybe the usage of `parametrize` is a bit exaggerated here. Edit2: Using Python 3.8 only features does not pleases the tests. Updating.
https://api.github.com/repos/pandas-dev/pandas/pulls/34705
2020-06-10T19:07:27Z
2020-06-15T12:53:18Z
2020-06-15T12:53:18Z
2020-06-15T12:53:22Z
CLN: disallow tuple in to_offset
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 8db3d7affc5a5..5f8668f85c3b3 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -820,6 +820,7 @@ Datetimelike - Bug in :meth:`DatetimeIndex.intersection` and :meth:`TimedeltaIndex.intersection` with results not having the correct ``name`` attribute (:issue:`33904`) - Bug in :meth:`DatetimeArray.__setitem__`, :meth:`TimedeltaArray.__setitem__`, :meth:`PeriodArray.__setitem__` incorrectly allowing values with ``int64`` dtype to be silently cast (:issue:`33717`) - Bug in subtracting :class:`TimedeltaIndex` from :class:`Period` incorrectly raising ``TypeError`` in some cases where it should succeed and ``IncompatibleFrequency`` in some cases where it should raise ``TypeError`` (:issue:`33883`) +- The ``freq`` keyword in :class:`Period`, :func:`date_range`, :func:`period_range`, :func:`pd.tseries.frequencies.to_offset` no longer allows tuples, pass as string instead (:issue:`34703`) Timedelta ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 093d53db21dc1..c6ab9188089e7 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3482,36 +3482,6 @@ INVALID_FREQ_ERR_MSG = "Invalid frequency: {0}" _offset_map = {} -cdef _base_and_stride(str freqstr): - """ - Return base freq and stride info from string representation - - Returns - ------- - base : str - stride : int - - Examples - -------- - _base_and_stride('5Min') -> 'Min', 5 - """ - groups = opattern.match(freqstr) - - if not groups: - raise ValueError(f"Could not evaluate {freqstr}") - - stride = groups.group(1) - - if len(stride): - stride = int(stride) - else: - stride = 1 - - base = groups.group(2) - - return base, stride - - # TODO: better name? def _get_offset(name: str) -> BaseOffset: """ @@ -3574,10 +3544,10 @@ cpdef to_offset(freq): >>> to_offset("1D1H") <25 * Hours> - >>> to_offset(("W", 2)) + >>> to_offset("2W") <2 * Weeks: weekday=6> - >>> to_offset((2, "B")) + >>> to_offset("2B") <2 * BusinessDays> >>> to_offset(pd.Timedelta(days=1)) @@ -3593,12 +3563,9 @@ cpdef to_offset(freq): return freq if isinstance(freq, tuple): - name = freq[0] - stride = freq[1] - if isinstance(stride, str): - name, stride = stride, name - name, _ = _base_and_stride(name) - delta = _get_offset(name) * stride + raise TypeError( + f"to_offset does not support tuples {freq}, pass as a string instead" + ) elif isinstance(freq, timedelta): return delta_to_tick(freq) diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index b15549839de03..c150e7901c86a 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -946,11 +946,6 @@ def test_datetimeindex_constructor_misc(self): assert idx[0] == sdate + 0 * offsets.BDay() assert idx.freq == "B" - idx = date_range(end=edate, freq=("D", 5), periods=20) - assert len(idx) == 20 - assert idx[-1] == edate - assert idx.freq == "5D" - idx1 = date_range(start=sdate, end=edate, freq="W-SUN") idx2 = date_range(start=sdate, end=edate, freq=offsets.Week(weekday=6)) assert len(idx1) == len(idx2) @@ -979,6 +974,12 @@ def test_pass_datetimeindex_to_index(self): tm.assert_numpy_array_equal(idx.values, expected.values) + def test_date_range_tuple_freq_raises(self): + # GH#34703 + edate = datetime(2000, 1, 1) + with pytest.raises(TypeError, match="pass as a string instead"): + date_range(end=edate, freq=("D", 5), periods=20) + def test_timestamp_constructor_invalid_fold_raise(): # Test for #25057 diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py index 4ec7ef64e2272..f85f37e4127c3 100644 --- a/pandas/tests/indexes/period/test_constructors.py +++ b/pandas/tests/indexes/period/test_constructors.py @@ -463,12 +463,6 @@ def test_constructor(self): assert (i1 == i2).all() assert i1.freq == i2.freq - end_intv = Period("2006-12-31", ("w", 1)) - i2 = period_range(end=end_intv, periods=10) - assert len(i1) == len(i2) - assert (i1 == i2).all() - assert i1.freq == i2.freq - end_intv = Period("2005-05-01", "B") i1 = period_range(start=start, end=end_intv) @@ -490,6 +484,10 @@ def test_constructor(self): with pytest.raises(IncompatibleFrequency, match=msg): PeriodIndex(vals) + # tuple freq disallowed GH#34703 + with pytest.raises(TypeError, match="pass as a string instead"): + Period("2006-12-31", ("w", 1)) + @pytest.mark.parametrize( "freq", ["M", "Q", "A", "D", "B", "T", "S", "L", "U", "N", "H"] ) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index d247d6571f5d0..47617802be11c 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -172,12 +172,6 @@ def test_period_index_length(self): assert (i1 == i2).all() assert i1.freq == i2.freq - end_intv = Period("2006-12-31", ("w", 1)) - i2 = period_range(end=end_intv, periods=10) - assert len(i1) == len(i2) - assert (i1 == i2).all() - assert i1.freq == i2.freq - msg = "start and end must have same freq" with pytest.raises(ValueError, match=msg): period_range(start=start, end=end_intv) diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 702899f163e06..dcef0615121c1 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -48,8 +48,6 @@ def test_construction(self): i1 = Period("1982", freq="min") i2 = Period("1982", freq="MIN") assert i1 == i2 - i2 = Period("1982", freq=("Min", 1)) - assert i1 == i2 i1 = Period(year=2005, month=3, day=1, freq="D") i2 = Period("3/1/2005", freq="D") @@ -80,6 +78,10 @@ def test_construction(self): with pytest.raises(ValueError, match=msg): Period("2007-1-1", freq="X") + # GH#34703 tuple freq disallowed + with pytest.raises(TypeError, match="pass as a string instead"): + Period("1982", freq=("Min", 1)) + def test_construction_bday(self): # Biz day construction, roll forward if non-weekday diff --git a/pandas/tests/tslibs/test_to_offset.py b/pandas/tests/tslibs/test_to_offset.py index 04be0e445a3b2..93e5e2c801c09 100644 --- a/pandas/tests/tslibs/test_to_offset.py +++ b/pandas/tests/tslibs/test_to_offset.py @@ -10,7 +10,6 @@ [ (to_offset("10us"), offsets.Micro(10)), (offsets.Hour(), offsets.Hour()), - ((5, "T"), offsets.Minute(5)), ("2h30min", offsets.Minute(150)), ("2h 30min", offsets.Minute(150)), ("2h30min15s", offsets.Second(150 * 60 + 15)), @@ -89,10 +88,16 @@ def test_to_offset_invalid(freqstr): def test_to_offset_no_evaluate(): - with pytest.raises(ValueError, match="Could not evaluate"): + msg = str(("", "")) + with pytest.raises(TypeError, match=msg): to_offset(("", "")) +def test_to_offset_tuple_unsupported(): + with pytest.raises(TypeError, match="pass as a string instead"): + to_offset((5, "T")) + + @pytest.mark.parametrize( "freqstr,expected", [
This is technically an API change, but AFAICT this isnt documented behavior anywhere.
https://api.github.com/repos/pandas-dev/pandas/pulls/34703
2020-06-10T17:48:15Z
2020-06-15T00:57:17Z
2020-06-15T00:57:17Z
2020-06-15T01:14:28Z
CLN: remove libfrequencies.get_freq_group
diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd index bce071d45c12f..f43bc283d98c7 100644 --- a/pandas/_libs/tslibs/dtypes.pxd +++ b/pandas/_libs/tslibs/dtypes.pxd @@ -1,3 +1,4 @@ +cdef dict attrname_to_abbrevs cdef enum c_FreqGroup: # Mirrors FreqGroup in the .pxy file diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index e38cfe21a65cc..0752910317077 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -21,6 +21,11 @@ cdef class PeriodDtypeBase: return False return self.dtype_code == other.dtype_code + @property + def freq_group(self) -> int: + # See also: libperiod.get_freq_group + return (self.dtype_code // 1000) * 1000 + @property def date_offset(self): """ @@ -108,6 +113,22 @@ _period_code_map.update({ }) +# Map attribute-name resolutions to resolution abbreviations +_attrname_to_abbrevs = { + "year": "A", + "quarter": "Q", + "month": "M", + "day": "D", + "hour": "H", + "minute": "T", + "second": "S", + "millisecond": "L", + "microsecond": "U", + "nanosecond": "N", +} +cdef dict attrname_to_abbrevs = _attrname_to_abbrevs + + class FreqGroup: # Mirrors c_FreqGroup in the .pxd file FR_ANN = 1000 @@ -123,3 +144,8 @@ class FreqGroup: FR_US = 11000 FR_NS = 12000 FR_UND = -10000 # undefined + + @staticmethod + def get_freq_group(code: int) -> int: + # See also: PeriodDtypeBase.freq_group + return (code // 1000) * 1000 diff --git a/pandas/_libs/tslibs/frequencies.pxd b/pandas/_libs/tslibs/frequencies.pxd index 896eec77ef4fe..b3ad6e6c19ee3 100644 --- a/pandas/_libs/tslibs/frequencies.pxd +++ b/pandas/_libs/tslibs/frequencies.pxd @@ -1,3 +1 @@ -cdef dict attrname_to_abbrevs - cpdef int get_to_timestamp_base(int base) diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index 6e525500ec37a..fd28240abd882 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -1,43 +1,8 @@ from .dtypes import FreqGroup -# --------------------------------------------------------------------- -# Period codes - - -# Map attribute-name resolutions to resolution abbreviations -_attrname_to_abbrevs = { - "year": "A", - "quarter": "Q", - "month": "M", - "day": "D", - "hour": "H", - "minute": "T", - "second": "S", - "millisecond": "L", - "microsecond": "U", - "nanosecond": "N", -} -cdef dict attrname_to_abbrevs = _attrname_to_abbrevs - - # ---------------------------------------------------------------------- -# TODO: this is now identical to the version in libperiod -def get_freq_group(freq: int) -> int: - """ - Return frequency code group of given frequency str or offset. - - Examples - -------- - >>> get_freq_group(4001) - 4000 - - >>> get_freq_group(4006) - 4000 - """ - return (freq // 1000) * 1000 - cpdef int get_to_timestamp_base(int base): """ diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 47ebf139ed496..d14f9d82eb5be 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -71,12 +71,10 @@ from pandas._libs.tslibs.dtypes cimport ( FR_MS, FR_US, FR_NS, -) - -from pandas._libs.tslibs.frequencies cimport ( attrname_to_abbrevs, - get_to_timestamp_base, ) + +from pandas._libs.tslibs.frequencies cimport get_to_timestamp_base from pandas._libs.tslibs.parsing cimport get_rule_month from pandas._libs.tslibs.parsing import parse_time_string from pandas._libs.tslibs.nattype cimport ( diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 7453933ddbb4f..55522e99459cb 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -5,9 +5,9 @@ from numpy cimport ndarray, int64_t, int32_t from pandas._libs.tslibs.util cimport get_nat +from pandas._libs.tslibs.dtypes cimport attrname_to_abbrevs from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dt64_to_dtstruct) -from pandas._libs.tslibs.frequencies cimport attrname_to_abbrevs from pandas._libs.tslibs.frequencies import FreqGroup from pandas._libs.tslibs.timezones cimport ( is_utc, is_tzlocal, maybe_get_tz, get_dst_info) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index fc29f786a1476..2022a4a563678 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -6,7 +6,6 @@ from pandas._libs import index as libindex from pandas._libs.lib import no_default from pandas._libs.tslibs import Period, Resolution -from pandas._libs.tslibs.frequencies import get_freq_group from pandas._libs.tslibs.parsing import DateParseError, parse_time_string from pandas._typing import DtypeObj, Label from pandas.util._decorators import Appender, cache_readonly, doc @@ -510,7 +509,7 @@ def get_loc(self, key, method=None, tolerance=None): reso = Resolution.from_attrname(reso) grp = reso.freq_group - freqn = get_freq_group(self.dtype.dtype_code) + freqn = self.dtype.freq_group # _get_string_slice will handle cases where grp < freqn assert grp >= freqn @@ -586,7 +585,7 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): def _validate_partial_date_slice(self, reso: Resolution): assert isinstance(reso, Resolution), (type(reso), reso) grp = reso.freq_group - freqn = get_freq_group(self.dtype.dtype_code) + freqn = self.dtype.freq_group if not grp < freqn: # TODO: we used to also check for diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 65f030223c7ca..05377e0c240b9 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -10,9 +10,9 @@ import matplotlib.units as units import numpy as np -from pandas._libs import lib, tslibs -from pandas._libs.tslibs import to_offset -from pandas._libs.tslibs.frequencies import FreqGroup, get_freq_group +from pandas._libs import lib +from pandas._libs.tslibs import Timestamp, to_offset +from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.offsets import BaseOffset from pandas.core.dtypes.common import ( @@ -45,7 +45,7 @@ def get_pairs(): pairs = [ - (tslibs.Timestamp, DatetimeConverter), + (Timestamp, DatetimeConverter), (Period, PeriodConverter), (pydt.datetime, DatetimeConverter), (pydt.date, DatetimeConverter), @@ -281,7 +281,7 @@ def try_parse(values): if isinstance(values, (datetime, pydt.date)): return _dt_to_float_ordinal(values) elif isinstance(values, np.datetime64): - return _dt_to_float_ordinal(tslibs.Timestamp(values)) + return _dt_to_float_ordinal(Timestamp(values)) elif isinstance(values, pydt.time): return dates.date2num(values) elif is_integer(values) or is_float(values): @@ -553,7 +553,7 @@ def _daily_finder(vmin, vmax, freq: BaseOffset): elif dtype_code == FreqGroup.FR_DAY: periodsperyear = 365 periodspermonth = 28 - elif get_freq_group(dtype_code) == FreqGroup.FR_WK: + elif FreqGroup.get_freq_group(dtype_code) == FreqGroup.FR_WK: periodsperyear = 52 periodspermonth = 3 else: # pragma: no cover diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index 189a0cc2171ad..5383c1ff1c2c9 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -1,7 +1,8 @@ import pytest from pandas._libs.tslibs import Resolution, to_offset -from pandas._libs.tslibs.frequencies import _attrname_to_abbrevs, get_to_timestamp_base +from pandas._libs.tslibs.dtypes import _attrname_to_abbrevs +from pandas._libs.tslibs.frequencies import get_to_timestamp_base @pytest.mark.parametrize(
The next (hopefully last) step after this is to move Resolution to dtypes.pyx and de-duplicate it with FreqGroup.
https://api.github.com/repos/pandas-dev/pandas/pulls/34701
2020-06-10T16:47:53Z
2020-06-14T14:20:26Z
2020-06-14T14:20:26Z
2020-06-14T15:11:04Z
CLN: remove usages of base_and_stride
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 4069d192d9e88..250ff608308d8 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3491,7 +3491,7 @@ INVALID_FREQ_ERR_MSG = "Invalid frequency: {0}" _offset_map = {} -cpdef base_and_stride(str freqstr): +cdef _base_and_stride(str freqstr): """ Return base freq and stride info from string representation @@ -3502,7 +3502,7 @@ cpdef base_and_stride(str freqstr): Examples -------- - _freq_and_stride('5Min') -> 'Min', 5 + _base_and_stride('5Min') -> 'Min', 5 """ groups = opattern.match(freqstr) @@ -3606,7 +3606,7 @@ cpdef to_offset(freq): stride = freq[1] if isinstance(stride, str): name, stride = stride, name - name, _ = base_and_stride(name) + name, _ = _base_and_stride(name) delta = _get_offset(name) * stride elif isinstance(freq, timedelta): diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 90513e355e732..b6c27abc321e1 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -12,7 +12,6 @@ conversion, fields, iNaT, - offsets as liboffsets, resolution as libresolution, timezones, to_offset, @@ -1106,8 +1105,7 @@ def to_period(self, freq=None): # https://github.com/pandas-dev/pandas/issues/33358 if res is None: - base, stride = liboffsets.base_and_stride(freq) - res = f"{stride}{base}" + res = freq freq = res diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 99fc730e818c4..fa8051954e435 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -7,7 +7,6 @@ from pandas._libs.tslibs import Period, to_offset from pandas._libs.tslibs.frequencies import FreqGroup -from pandas._libs.tslibs.offsets import base_and_stride from pandas._typing import FrameOrSeriesUnion from pandas.core.dtypes.generic import ( @@ -167,12 +166,9 @@ def _get_ax_freq(ax): def _get_period_alias(freq) -> Optional[str]: - if isinstance(freq, DateOffset): - freq = freq.rule_code - else: - freq = base_and_stride(freq)[0] + freqstr = to_offset(freq).rule_code - freq = get_period_alias(freq) + freq = get_period_alias(freqstr) return freq
https://api.github.com/repos/pandas-dev/pandas/pulls/34700
2020-06-10T16:19:53Z
2020-06-14T14:15:12Z
2020-06-14T14:15:12Z
2020-06-14T15:12:19Z
TYP: type annotations for read_sas
diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index bd8c3be271505..291c9d1ee7f0c 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -1,11 +1,16 @@ """ Read SAS sas7bdat or xport files. """ - from abc import ABCMeta, abstractmethod +from typing import TYPE_CHECKING, Optional, Union, overload + +from pandas._typing import FilePathOrBuffer, Label from pandas.io.common import stringify_path +if TYPE_CHECKING: + from pandas import DataFrame # noqa: F401 + # TODO(PY38): replace with Protocol in Python 3.8 class ReaderBase(metaclass=ABCMeta): @@ -22,14 +27,38 @@ def close(self): pass +@overload +def read_sas( + filepath_or_buffer: FilePathOrBuffer, + format: Optional[str] = ..., + index: Optional[Label] = ..., + encoding: Optional[str] = ..., + chunksize: int = ..., + iterator: bool = ..., +) -> ReaderBase: + ... + + +@overload +def read_sas( + filepath_or_buffer: FilePathOrBuffer, + format: Optional[str] = ..., + index: Optional[Label] = ..., + encoding: Optional[str] = ..., + chunksize: None = ..., + iterator: bool = ..., +) -> Union["DataFrame", ReaderBase]: + ... + + def read_sas( - filepath_or_buffer, - format=None, - index=None, - encoding=None, - chunksize=None, - iterator=False, -): + filepath_or_buffer: FilePathOrBuffer, + format: Optional[str] = None, + index: Optional[Label] = None, + encoding: Optional[str] = None, + chunksize: Optional[int] = None, + iterator: bool = False, +) -> Union["DataFrame", ReaderBase]: """ Read SAS files stored as either XPORT or SAS7BDAT format files.
https://api.github.com/repos/pandas-dev/pandas/pulls/34697
2020-06-10T15:00:17Z
2020-06-13T18:01:47Z
2020-06-13T18:01:47Z
2020-06-13T18:01:59Z
TYP: check_untyped_defs pandas.io.json._table_schema
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 239ff6241aab0..84146a5d732e1 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -3,10 +3,11 @@ https://specs.frictionlessdata.io/json-table-schema/ """ +from typing import TYPE_CHECKING, Any, Dict, Optional, cast import warnings import pandas._libs.json as json -from pandas._typing import DtypeObj +from pandas._typing import DtypeObj, FrameOrSeries, JSONSerializable from pandas.core.dtypes.common import ( is_bool_dtype, @@ -24,6 +25,9 @@ from pandas import DataFrame import pandas.core.common as com +if TYPE_CHECKING: + from pandas.core.indexes.multi import MultiIndex # noqa: F401 + loads = json.loads @@ -103,7 +107,10 @@ def convert_pandas_type_to_json_field(arr): name = "values" else: name = arr.name - field = {"name": name, "type": as_json_table_type(dtype)} + field: Dict[str, JSONSerializable] = { + "name": name, + "type": as_json_table_type(dtype), + } if is_categorical_dtype(dtype): cats = dtype.categories @@ -182,7 +189,12 @@ def convert_json_field_to_pandas_type(field): raise ValueError(f"Unsupported or invalid field type: {typ}") -def build_table_schema(data, index=True, primary_key=None, version=True): +def build_table_schema( + data: FrameOrSeries, + index: bool = True, + primary_key: Optional[bool] = None, + version: bool = True, +) -> Dict[str, JSONSerializable]: """ Create a Table schema from ``data``. @@ -233,11 +245,12 @@ def build_table_schema(data, index=True, primary_key=None, version=True): if index is True: data = set_default_names(data) - schema = {} + schema: Dict[str, Any] = {} fields = [] if index: if data.index.nlevels > 1: + data.index = cast("MultiIndex", data.index) for level, name in zip(data.index.levels, data.index.names): new_field = convert_pandas_type_to_json_field(level) new_field["name"] = name diff --git a/setup.cfg b/setup.cfg index ea3d4c67d9358..31388b252dc8b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -274,9 +274,6 @@ check_untyped_defs=False [mypy-pandas.io.json._json] check_untyped_defs=False -[mypy-pandas.io.json._table_schema] -check_untyped_defs=False - [mypy-pandas.io.parsers] check_untyped_defs=False
pandas\io\json\_table_schema.py:112: error: Incompatible types in assignment (expression has type "Dict[str, List[Any]]", target has type "str") pandas\io\json\_table_schema.py:264: error: Incompatible types in assignment (expression has type "str", target has type "List[Any]")
https://api.github.com/repos/pandas-dev/pandas/pulls/34695
2020-06-10T14:39:23Z
2020-06-14T09:51:40Z
2020-06-14T09:51:40Z
2020-06-14T09:51:51Z
TYP: check_untyped_defs pandas.core.resample
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 49cb78340d104..fc29f786a1476 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -64,8 +64,7 @@ def _new_PeriodIndex(cls, **d): @inherit_names( - ["strftime", "to_timestamp", "asfreq", "start_time", "end_time"] - + PeriodArray._field_ops, + ["strftime", "to_timestamp", "start_time", "end_time"] + PeriodArray._field_ops, PeriodArray, wrap=True, ) @@ -152,6 +151,14 @@ class PeriodIndex(DatetimeIndexOpsMixin, Int64Index): _engine_type = libindex.PeriodEngine _supports_partial_string_indexing = True + # -------------------------------------------------------------------- + # methods that dispatch to array and wrap result in PeriodIndex + + @doc(PeriodArray.asfreq) + def asfreq(self, freq=None, how: str = "E") -> "PeriodIndex": + arr = self._data.asfreq(freq, how) + return type(self)._simple_new(arr, name=self.name) + # ------------------------------------------------------------------------ # Index Constructors diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 32e947dc414d2..5e363f2814d39 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -966,7 +966,8 @@ def __init__(self, obj, *args, **kwargs): for attr in self._attributes: setattr(self, attr, kwargs.get(attr, getattr(parent, attr))) - super().__init__(None) + # error: Too many arguments for "__init__" of "object" + super().__init__(None) # type: ignore self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True @@ -1553,7 +1554,7 @@ def _get_time_delta_bins(self, ax): return binner, bins, labels - def _get_time_period_bins(self, ax): + def _get_time_period_bins(self, ax: DatetimeIndex): if not isinstance(ax, DatetimeIndex): raise TypeError( "axis must be a DatetimeIndex, but got " @@ -1569,13 +1570,13 @@ def _get_time_period_bins(self, ax): labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name) end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp() - if ax.tzinfo: - end_stamps = end_stamps.tz_localize(ax.tzinfo) + if ax.tz: + end_stamps = end_stamps.tz_localize(ax.tz) bins = ax.searchsorted(end_stamps, side="left") return binner, bins, labels - def _get_period_bins(self, ax): + def _get_period_bins(self, ax: PeriodIndex): if not isinstance(ax, PeriodIndex): raise TypeError( "axis must be a PeriodIndex, but got " @@ -1898,6 +1899,7 @@ def _asfreq_compat(index, freq): raise ValueError( "Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex" ) + new_index: Index if isinstance(index, PeriodIndex): new_index = index.asfreq(freq=freq) else: diff --git a/setup.cfg b/setup.cfg index ea3d4c67d9358..203a776cd6a0d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -217,9 +217,6 @@ check_untyped_defs=False [mypy-pandas.core.ops.docstrings] check_untyped_defs=False -[mypy-pandas.core.resample] -check_untyped_defs=False - [mypy-pandas.core.reshape.merge] check_untyped_defs=False
pandas\core\resample.py:969: error: Too many arguments for "__init__" of "object" pandas\core\resample.py:1572: error: "DatetimeIndex" has no attribute "tzinfo" pandas\core\resample.py:1573: error: "DatetimeIndex" has no attribute "tzinfo" pandas\core\resample.py:1585: error: "PeriodIndex" has no attribute "asfreq"; maybe "freq"? pandas\core\resample.py:1867: error: "PeriodIndex" has no attribute "asfreq"; maybe "freq"? pandas\core\resample.py:1902: error: "PeriodIndex" has no attribute "asfreq"; maybe "freq"?
https://api.github.com/repos/pandas-dev/pandas/pulls/34692
2020-06-10T14:07:09Z
2020-06-13T18:25:54Z
2020-06-13T18:25:54Z
2020-06-13T18:49:41Z
DOC: Add note about shallow clones in contributing guide
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 163d345b4f829..b85e9403038ab 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -136,6 +136,10 @@ want to clone your fork to your machine:: This creates the directory `pandas-yourname` and connects your repository to the upstream (main project) *pandas* repository. +Note that performing a shallow clone (with ``--depth==N``, for some ``N`` greater +or equal to 1) might break some tests and features as ``pd.show_versions()`` +as the version number cannot be computed anymore. + .. _contributing.dev_env: Creating a development environment
- [ ] closes #xxxx - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry I didn't open a new issue for this, let me know if I should, I'll gladly do it (I got the impression that that's not mandatory by reading the contributing guidelines). Feel also free to close without merging if I'm the only person who got confused by this.
https://api.github.com/repos/pandas-dev/pandas/pulls/34690
2020-06-10T13:48:43Z
2020-06-10T14:54:15Z
2020-06-10T14:54:14Z
2020-06-10T14:54:15Z
TYP: check_untyped_defs pandas.core.nanops
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 6b8518d8a47a0..e7e5e37bb7817 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -86,7 +86,7 @@ def __init__(self, name=None, **kwargs): self.name = name self.kwargs = kwargs - def __call__(self, alt): + def __call__(self, alt: F) -> F: bn_name = self.name or alt.__name__ try: @@ -130,7 +130,7 @@ def f( return result - return f + return cast(F, f) def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool: @@ -514,7 +514,12 @@ def nansum( @disallow(PeriodDtype) @bottleneck_switch() -def nanmean(values, axis=None, skipna=True, mask=None): +def nanmean( + values: np.ndarray, + axis: Optional[int] = None, + skipna: bool = True, + mask: Optional[np.ndarray] = None, +) -> float: """ Compute the mean of the element along an axis ignoring NaNs @@ -528,7 +533,7 @@ def nanmean(values, axis=None, skipna=True, mask=None): Returns ------- - result : float + float Unless input is a float array, in which case use the same precision as the input array. @@ -558,6 +563,7 @@ def nanmean(values, axis=None, skipna=True, mask=None): the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) if axis is not None and getattr(the_sum, "ndim", False): + count = cast(np.ndarray, count) with np.errstate(all="ignore"): # suppress division by zero warnings the_mean = the_sum / count @@ -1205,17 +1211,17 @@ def _maybe_arg_null_out( def _get_counts( - values_shape: Tuple[int], + values_shape: Tuple[int, ...], mask: Optional[np.ndarray], axis: Optional[int], dtype: Dtype = float, -) -> Union[int, np.ndarray]: +) -> Union[int, float, np.ndarray]: """ Get the count of non-null values along an axis Parameters ---------- - values_shape : Tuple[int] + values_shape : tuple of int shape tuple from values ndarray, used if mask is None mask : Optional[ndarray[bool]] locations in values that should be considered missing diff --git a/setup.cfg b/setup.cfg index ea3d4c67d9358..98ceb65a65f0f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -211,9 +211,6 @@ check_untyped_defs=False [mypy-pandas.core.missing] check_untyped_defs=False -[mypy-pandas.core.nanops] -check_untyped_defs=False - [mypy-pandas.core.ops.docstrings] check_untyped_defs=False
pandas\core\nanops.py:565: error: Item "bool" of "Union[bool, Any]" has no attribute "any"
https://api.github.com/repos/pandas-dev/pandas/pulls/34689
2020-06-10T13:00:05Z
2020-06-14T14:56:37Z
2020-06-14T14:56:37Z
2020-06-14T15:51:36Z
TYP: update setup.cfg
diff --git a/setup.cfg b/setup.cfg index ea3d4c67d9358..e31d6001e065d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -136,9 +136,6 @@ check_untyped_defs=False [mypy-pandas.conftest] ignore_errors=True -[mypy-pandas.tests.arithmetic.test_datetime64] -ignore_errors=True - [mypy-pandas.tests.tools.test_to_datetime] ignore_errors=True @@ -303,6 +300,3 @@ check_untyped_defs=False [mypy-pandas.tseries.holiday] check_untyped_defs=False - -[mypy-pandas.tseries.offsets] -check_untyped_defs=False
https://api.github.com/repos/pandas-dev/pandas/pulls/34688
2020-06-10T12:16:47Z
2020-06-13T17:57:28Z
2020-06-13T17:57:28Z
2020-06-13T17:57:49Z
CLN: dont consolidate in reshape.concat
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 2630c07814bb2..59301391a7dad 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -6,7 +6,16 @@ import numpy as np -from pandas._libs import NaT, algos as libalgos, internals as libinternals, lib, writers +from pandas._libs import ( + Interval, + NaT, + Period, + Timestamp, + algos as libalgos, + internals as libinternals, + lib, + writers, +) from pandas._libs.internals import BlockPlacement from pandas._libs.tslibs import conversion from pandas._libs.tslibs.timezones import tz_compare @@ -41,17 +50,15 @@ is_float_dtype, is_integer, is_integer_dtype, - is_interval_dtype, is_list_like, is_object_dtype, - is_period_dtype, is_re, is_re_compilable, is_sparse, is_timedelta64_dtype, pandas_dtype, ) -from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex, ABCPandasArray, ABCSeries from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, isna_compat @@ -2629,36 +2636,38 @@ def get_block_type(values, dtype=None): ------- cls : class, subclass of Block """ + # We use vtype and kind checks because they are much more performant + # than is_foo_dtype dtype = dtype or values.dtype vtype = dtype.type + kind = dtype.kind cls: Type[Block] if is_sparse(dtype): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock - elif is_categorical_dtype(values.dtype): + elif isinstance(dtype, CategoricalDtype): cls = CategoricalBlock - elif issubclass(vtype, np.datetime64): - assert not is_datetime64tz_dtype(values.dtype) - cls = DatetimeBlock - elif is_datetime64tz_dtype(values.dtype): + elif vtype is Timestamp: cls = DatetimeTZBlock - elif is_interval_dtype(dtype) or is_period_dtype(dtype): + elif vtype is Interval or vtype is Period: cls = ObjectValuesExtensionBlock - elif is_extension_array_dtype(values.dtype): + elif isinstance(dtype, ExtensionDtype): # Note: need to be sure PandasArray is unwrapped before we get here cls = ExtensionBlock - elif issubclass(vtype, np.floating): - cls = FloatBlock - elif issubclass(vtype, np.timedelta64): - assert issubclass(vtype, np.integer) + + elif kind == "M": + cls = DatetimeBlock + elif kind == "m": cls = TimeDeltaBlock - elif issubclass(vtype, np.complexfloating): + elif kind == "f": + cls = FloatBlock + elif kind == "c": cls = ComplexBlock - elif issubclass(vtype, np.integer): + elif kind == "i" or kind == "u": cls = IntBlock - elif dtype == np.bool_: + elif kind == "b": cls = BoolBlock else: cls = ObjectBlock diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 06de1972b4c9a..dd3a04ccb38e2 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -70,14 +70,21 @@ def concatenate_block_managers( vals = [ju.block.values for ju in join_units] if not blk.is_extension: - values = concat_compat(vals, axis=blk.ndim - 1) + # _is_uniform_join_units ensures a single dtype, so + # we can use np.concatenate, which is more performant + # than concat_compat + values = np.concatenate(vals, axis=blk.ndim - 1) else: # TODO(EA2D): special-casing not needed with 2D EAs values = concat_compat(vals) if not isinstance(values, ExtensionArray): values = values.reshape(1, len(values)) - b = make_block(values, placement=placement, ndim=blk.ndim) + if blk.values.dtype == values.dtype: + # Fast-path + b = blk.make_block_same_class(values, placement=placement) + else: + b = make_block(values, placement=placement, ndim=blk.ndim) else: b = make_block( _concatenate_join_units(join_units, concat_axis, copy=copy), diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 4a2629daf63d7..42b541bd4cb02 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -18,6 +18,7 @@ import numpy as np from pandas._typing import FrameOrSeriesUnion, Label +from pandas.util._decorators import cache_readonly from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries @@ -360,7 +361,7 @@ def __init__( if len(objs) == 0: raise ValueError("All objects passed were None") - # consolidate data & figure out what our result ndim is going to be + # figure out what our result ndim is going to be ndims = set() for obj in objs: if not isinstance(obj, (ABCSeries, ABCDataFrame)): @@ -370,8 +371,6 @@ def __init__( ) raise TypeError(msg) - # consolidate - obj._consolidate_inplace() ndims.add(obj.ndim) # get the sample @@ -543,7 +542,7 @@ def _get_result_dim(self) -> int: def _get_new_axes(self) -> List[Index]: ndim = self._get_result_dim() return [ - self._get_concat_axis() if i == self.bm_axis else self._get_comb_axis(i) + self._get_concat_axis if i == self.bm_axis else self._get_comb_axis(i) for i in range(ndim) ] @@ -557,6 +556,7 @@ def _get_comb_axis(self, i: int) -> Index: copy=self.copy, ) + @cache_readonly def _get_concat_axis(self) -> Index: """ Return index to be used along concatenation axis.
Looks like this consolidation was added here https://github.com/pandas-dev/pandas/commit/3b1c5b74e0ca5a782d4c070aac002b3e6d7e5290 in 2012, no clear reason why it is needed. About to start an asv run.
https://api.github.com/repos/pandas-dev/pandas/pulls/34683
2020-06-10T01:52:28Z
2020-12-17T21:05:28Z
2020-12-17T21:05:28Z
2020-12-17T21:48:07Z
CLN: dont consolidate in groupby
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 9838cff9b34f9..e385a78142ba5 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -497,7 +497,6 @@ def __init__( self._selection = selection assert isinstance(obj, NDFrame), type(obj) - obj._consolidate_inplace() self.level = level
full asv run looks like pure noise ``` before after ratio [03f5066c] [f2cae354] <master~3> <cln-consolidate-less-3> + 38.4±0.1μs 45.7±6μs 1.19 algorithms.SortIntegerArray.time_argsort(1000) + 11.5±0.2ms 13.3±0.4ms 1.16 groupby.Nth.time_series_nth('float64') + 7.09±0.02ms 8.02±0.04ms 1.13 io.hdf.HDFStoreDataFrame.time_query_store_table + 12.3±0.1μs 13.7±0.3μs 1.11 tslibs.timestamp.TimestampOps.time_replace_None('US/Eastern') + 3.96±0.03μs 4.36±0.04μs 1.10 period.Indexing.time_get_loc - 1.25±0ms 1.13±0ms 0.91 arithmetic.OffsetArrayArithmetic.time_add_series_offset(<DateOffset: days=2, months=2>) - 1.22±0ms 1.10±0ms 0.90 arithmetic.OffsetArrayArithmetic.time_add_dti_offset(<DateOffset: days=2, months=2>) - 1.54±0.1μs 1.39±0.06μs 0.90 index_cached_properties.IndexCache.time_is_all_dates('MultiIndex') - 12.9±0.5μs 11.6±0.5μs 0.90 tslibs.timedelta.TimedeltaConstructor.time_from_components - 3.75±0.06ms 3.35±0ms 0.89 rolling.ForwardWindowMethods.time_rolling('Series', 1000, 'float', 'min') - 1.22±0.01ms 1.08±0ms 0.89 arithmetic.ApplyIndex.time_apply_index(<DateOffset: days=2, months=2>) - 4.47±0.2μs 3.95±0.08μs 0.88 tslibs.timedelta.TimedeltaConstructor.time_from_datetime_timedelta - 12.2±0.06ms 10.7±0.04ms 0.88 groupby.MultiColumn.time_col_select_numpy_sum - 3.98±0.5μs 3.45±0.07μs 0.87 index_cached_properties.IndexCache.time_shape('DatetimeIndex') - 30.2±0.2ms 26.1±0.08ms 0.86 frame_methods.Dropna.time_dropna_axis_mixed_dtypes('any', 0) - 2.25±1μs 1.78±0.06μs 0.79 index_cached_properties.IndexCache.time_shape('UInt64Index') - 5.76±0.9μs 4.51±0.3μs 0.78 index_cached_properties.IndexCache.time_shape('IntervalIndex') - 2.56±0.5μs 1.99±0.2μs 0.77 index_cached_properties.IndexCache.time_values('IntervalIndex') - 3.67±0.02ms 2.66±0.1ms 0.73 rolling.ForwardWindowMethods.time_rolling('Series', 1000, 'int', 'min') - 4.34±1μs 2.95±0.2μs 0.68 index_cached_properties.IndexCache.time_shape('CategoricalIndex') - 2.48±2μs 1.65±0.09μs 0.66 index_cached_properties.IndexCache.time_shape('Float64Index') - 3.04±1μs 1.96±0.1μs 0.64 index_cached_properties.IndexCache.time_values('CategoricalIndex') - 1.88±0.6μs 1.09±0.06μs 0.58 index_cached_properties.IndexCache.time_values('Float64Index') - 3.14±1μs 1.16±0.05μs 0.37 index_cached_properties.IndexCache.time_values('UInt64Index') ```
https://api.github.com/repos/pandas-dev/pandas/pulls/34680
2020-06-09T21:17:54Z
2020-06-09T22:21:37Z
2020-06-09T22:21:37Z
2020-06-10T07:47:26Z
CLN: dont consolidate in indexing
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 9c8b01003bece..4d5a9e54e7d37 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1676,7 +1676,6 @@ def isetter(loc, v): ser = v else: # set the item, possibly having a dtype change - ser._consolidate_inplace() ser = ser.copy() ser._mgr = ser._mgr.setitem(indexer=pi, value=v) ser._maybe_update_cacher(clear=True)
https://api.github.com/repos/pandas-dev/pandas/pulls/34679
2020-06-09T21:14:10Z
2020-06-26T23:59:12Z
2020-06-26T23:59:12Z
2020-06-27T00:21:15Z
CLN: dont consolidate in NDFrame._is_numeric_mixed_type
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7c3e975c889e1..9014e576eeb39 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5331,15 +5331,10 @@ def _is_mixed_type(self) -> bool_t: f = lambda: self._mgr.is_mixed_type return self._protect_consolidate(f) - @property - def _is_numeric_mixed_type(self) -> bool_t: - f = lambda: self._mgr.is_numeric_mixed_type - return self._protect_consolidate(f) - def _check_inplace_setting(self, value) -> bool_t: """ check whether we allow in-place setting with this type of value """ if self._is_mixed_type: - if not self._is_numeric_mixed_type: + if not self._mgr.is_numeric_mixed_type: # allow an actual np.nan thru if is_float(value) and np.isnan(value):
https://api.github.com/repos/pandas-dev/pandas/pulls/34678
2020-06-09T21:09:45Z
2020-06-15T17:28:17Z
2020-06-15T17:28:17Z
2020-06-15T18:12:24Z
CLN: remove get_freq_code
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 6dbb4ce7bc974..6f173a4542bb0 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -18,7 +18,7 @@ "to_offset", ] -from . import dtypes # type: ignore +from . import dtypes from .conversion import localize_pydatetime from .nattype import NaT, NaTType, iNaT, is_null_datetimelike, nat_strings from .np_datetime import OutOfBoundsDatetime diff --git a/pandas/_libs/tslibs/frequencies.pxd b/pandas/_libs/tslibs/frequencies.pxd index 098944c965df0..896eec77ef4fe 100644 --- a/pandas/_libs/tslibs/frequencies.pxd +++ b/pandas/_libs/tslibs/frequencies.pxd @@ -1,5 +1,3 @@ cdef dict attrname_to_abbrevs -cpdef get_freq_code(freqstr) cpdef int get_to_timestamp_base(int base) -cpdef str get_freq_str(base, mult=*) diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index 9ff34ef0b6f89..6e525500ec37a 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -1,18 +1,5 @@ -cimport numpy as cnp -cnp.import_array() -from pandas._libs.tslibs.util cimport is_integer_object - -from pandas._libs.tslibs.offsets cimport is_offset_object -from pandas._libs.tslibs.offsets import ( - INVALID_FREQ_ERR_MSG, - _dont_uppercase, - _lite_rule_alias, - base_and_stride, - opattern, -) - -from .dtypes import FreqGroup, _period_code_map, _reverse_period_code_map +from .dtypes import FreqGroup # --------------------------------------------------------------------- # Period codes @@ -36,131 +23,22 @@ cdef dict attrname_to_abbrevs = _attrname_to_abbrevs # ---------------------------------------------------------------------- -def get_freq_group(freq) -> int: +# TODO: this is now identical to the version in libperiod +def get_freq_group(freq: int) -> int: """ Return frequency code group of given frequency str or offset. Examples -------- - >>> get_freq_group('W-MON') + >>> get_freq_group(4001) 4000 - >>> get_freq_group('W-FRI') + >>> get_freq_group(4006) 4000 """ - if is_offset_object(freq): - freq = freq.rule_code - - if isinstance(freq, str): - freq = attrname_to_abbrevs.get(freq, freq) - base, mult = get_freq_code(freq) - freq = base - elif isinstance(freq, int): - pass - else: - raise ValueError('input must be str, offset or int') return (freq // 1000) * 1000 -cpdef get_freq_code(freqstr): - """ - Return freq str or tuple to freq code and stride (mult) - - Parameters - ---------- - freqstr : str or tuple - - Returns - ------- - return : tuple of base frequency code and stride (mult) - - Raises - ------ - TypeError : if passed a tuple witth incorrect types - - Examples - -------- - >>> get_freq_code('3D') - (6000, 3) - - >>> get_freq_code('D') - (6000, 1) - - >>> get_freq_code(('D', 3)) - (6000, 3) - """ - if is_offset_object(freqstr): - freqstr = (freqstr.rule_code, freqstr.n) - - if isinstance(freqstr, tuple): - if is_integer_object(freqstr[0]) and is_integer_object(freqstr[1]): - # e.g., freqstr = (2000, 1) - return freqstr - elif is_integer_object(freqstr[0]): - # Note: passing freqstr[1] below will raise TypeError if that - # is not a str - code = _period_str_to_code(freqstr[1]) - stride = freqstr[0] - return code, stride - else: - # e.g., freqstr = ('T', 5) - code = _period_str_to_code(freqstr[0]) - stride = freqstr[1] - return code, stride - - if is_integer_object(freqstr): - return freqstr, 1 - - base, stride = base_and_stride(freqstr) - code = _period_str_to_code(base) - - return code, stride - - -cpdef _period_str_to_code(str freqstr): - freqstr = _lite_rule_alias.get(freqstr, freqstr) - - if freqstr not in _dont_uppercase: - lower = freqstr.lower() - freqstr = _lite_rule_alias.get(lower, freqstr) - - if freqstr not in _dont_uppercase: - freqstr = freqstr.upper() - try: - return _period_code_map[freqstr] - except KeyError: - raise ValueError(INVALID_FREQ_ERR_MSG.format(freqstr)) - - -cpdef str get_freq_str(base, mult=1): - """ - Return the summary string associated with this offset code, possibly - adjusted by a multiplier. - - Parameters - ---------- - base : int (member of FreqGroup) - - Returns - ------- - freq_str : str - - Examples - -------- - >>> get_freq_str(1000) - 'A-DEC' - - >>> get_freq_str(2000, 2) - '2Q-DEC' - - >>> get_freq_str("foo") - """ - code = _reverse_period_code_map.get(base) - if mult == 1: - return code - return str(mult) + code - - cpdef int get_to_timestamp_base(int base): """ Return frequency code group used for base of to_timestamp against diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index c06f34e37ec49..47ebf139ed496 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -75,8 +75,6 @@ from pandas._libs.tslibs.dtypes cimport ( from pandas._libs.tslibs.frequencies cimport ( attrname_to_abbrevs, - get_freq_code, - get_freq_str, get_to_timestamp_base, ) from pandas._libs.tslibs.parsing cimport get_rule_month diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 8eb1bdadf9156..90513e355e732 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -11,8 +11,8 @@ Timestamp, conversion, fields, - frequencies as libfrequencies, iNaT, + offsets as liboffsets, resolution as libresolution, timezones, to_offset, @@ -1106,7 +1106,7 @@ def to_period(self, freq=None): # https://github.com/pandas-dev/pandas/issues/33358 if res is None: - base, stride = libfrequencies.base_and_stride(freq) + base, stride = liboffsets.base_and_stride(freq) res = f"{stride}{base}" freq = res diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 43dfd94b49215..49cb78340d104 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -503,7 +503,7 @@ def get_loc(self, key, method=None, tolerance=None): reso = Resolution.from_attrname(reso) grp = reso.freq_group - freqn = get_freq_group(self.freq) + freqn = get_freq_group(self.dtype.dtype_code) # _get_string_slice will handle cases where grp < freqn assert grp >= freqn @@ -579,7 +579,7 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): def _validate_partial_date_slice(self, reso: Resolution): assert isinstance(reso, Resolution), (type(reso), reso) grp = reso.freq_group - freqn = get_freq_group(self.freq) + freqn = get_freq_group(self.dtype.dtype_code) if not grp < freqn: # TODO: we used to also check for diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index a9cca32271b9f..99fc730e818c4 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -6,7 +6,8 @@ import numpy as np from pandas._libs.tslibs import Period, to_offset -from pandas._libs.tslibs.frequencies import FreqGroup, base_and_stride +from pandas._libs.tslibs.frequencies import FreqGroup +from pandas._libs.tslibs.offsets import base_and_stride from pandas._typing import FrameOrSeriesUnion from pandas.core.dtypes.generic import ( diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py index 23dedf6f86a09..0d39e034905d2 100644 --- a/pandas/tests/indexes/datetimes/test_scalar_compat.py +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -7,6 +7,7 @@ import pytest from pandas._libs.tslibs import OutOfBoundsDatetime, to_offset +from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG import pandas as pd from pandas import DatetimeIndex, Timestamp, date_range @@ -118,7 +119,7 @@ def test_round(self, tz_naive_fixture): tm.assert_index_equal(rng.round(freq="H"), expected_rng) assert elt.round(freq="H") == expected_elt - msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG + msg = INVALID_FREQ_ERR_MSG with pytest.raises(ValueError, match=msg): rng.round(freq="foo") with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/indexes/datetimes/test_to_period.py b/pandas/tests/indexes/datetimes/test_to_period.py index d82fc1ef6743b..51cc6af2eed08 100644 --- a/pandas/tests/indexes/datetimes/test_to_period.py +++ b/pandas/tests/indexes/datetimes/test_to_period.py @@ -6,7 +6,7 @@ import pytz from pandas._libs.tslibs.ccalendar import MONTHS -from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG from pandas import ( DatetimeIndex, diff --git a/pandas/tests/indexes/timedeltas/test_scalar_compat.py b/pandas/tests/indexes/timedeltas/test_scalar_compat.py index 1b86cd1df5a7a..16c19b8d00380 100644 --- a/pandas/tests/indexes/timedeltas/test_scalar_compat.py +++ b/pandas/tests/indexes/timedeltas/test_scalar_compat.py @@ -5,6 +5,8 @@ import numpy as np import pytest +from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG + import pandas as pd from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range import pandas._testing as tm @@ -58,7 +60,7 @@ def test_tdi_round(self): tm.assert_index_equal(td.round(freq="H"), expected_rng) assert elt.round(freq="H") == expected_elt - msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG + msg = INVALID_FREQ_ERR_MSG with pytest.raises(ValueError, match=msg): td.round(freq="foo") with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py index b9f637c178d53..56281521deb90 100644 --- a/pandas/tests/scalar/period/test_asfreq.py +++ b/pandas/tests/scalar/period/test_asfreq.py @@ -1,6 +1,7 @@ import pytest -from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map +from pandas._libs.tslibs.dtypes import _period_code_map +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG from pandas.errors import OutOfBoundsDatetime from pandas import Period, Timestamp, offsets diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 3e769b577582a..702899f163e06 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -6,9 +6,8 @@ from pandas._libs.tslibs import iNaT, period as libperiod from pandas._libs.tslibs.ccalendar import DAYS, MONTHS -from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG from pandas._libs.tslibs.parsing import DateParseError -from pandas._libs.tslibs.period import IncompatibleFrequency +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG, IncompatibleFrequency from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz from pandas.compat.numpy import np_datetime64_compat diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index 388ff4ea039be..8641bbd0a66f2 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -6,7 +6,7 @@ from pytz import utc from pandas._libs.tslibs import NaT, Timestamp, conversion, to_offset -from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG import pandas.util._test_decorators as td import pandas._testing as tm diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index f0ff449d902d0..189a0cc2171ad 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -1,80 +1,7 @@ import pytest -from pandas._libs.tslibs import Resolution, offsets, to_offset -from pandas._libs.tslibs.frequencies import ( - FreqGroup, - _attrname_to_abbrevs, - _period_code_map, - get_freq_code, - get_freq_group, - get_to_timestamp_base, -) - - -@pytest.fixture(params=list(_period_code_map.items())) -def period_code_item(request): - return request.param - - -@pytest.mark.parametrize( - "freqstr,expected", - [ - ("A", 1000), - ("3A", 1000), - ("-1A", 1000), - ("Y", 1000), - ("3Y", 1000), - ("-1Y", 1000), - ("W", 4000), - ("W-MON", 4001), - ("W-FRI", 4005), - ], -) -def test_freq_code(freqstr, expected): - assert get_freq_code(freqstr)[0] == expected - - -def test_freq_code_match(period_code_item): - freqstr, code = period_code_item - assert get_freq_code(freqstr)[0] == code - - -@pytest.mark.parametrize( - "freqstr,expected", - [ - ("A", 1000), - ("3A", 1000), - ("-1A", 1000), - ("A-JAN", 1000), - ("A-MAY", 1000), - ("Y", 1000), - ("3Y", 1000), - ("-1Y", 1000), - ("Y-JAN", 1000), - ("Y-MAY", 1000), - (offsets.YearEnd(), 1000), - (offsets.YearEnd(month=1), 1000), - (offsets.YearEnd(month=5), 1000), - ("W", 4000), - ("W-MON", 4000), - ("W-FRI", 4000), - (offsets.Week(), 4000), - (offsets.Week(weekday=1), 4000), - (offsets.Week(weekday=5), 4000), - ("T", FreqGroup.FR_MIN), - ], -) -def test_freq_group(freqstr, expected): - assert get_freq_group(freqstr) == expected - - -def test_freq_group_match(period_code_item): - freqstr, code = period_code_item - - str_group = get_freq_group(freqstr) - code_group = get_freq_group(code) - - assert str_group == code_group == code // 1000 * 1000 +from pandas._libs.tslibs import Resolution, to_offset +from pandas._libs.tslibs.frequencies import _attrname_to_abbrevs, get_to_timestamp_base @pytest.mark.parametrize( @@ -82,9 +9,9 @@ def test_freq_group_match(period_code_item): [("D", "D"), ("W", "D"), ("M", "D"), ("S", "S"), ("T", "S"), ("H", "S")], ) def test_get_to_timestamp_base(freqstr, exp_freqstr): - tsb = get_to_timestamp_base - - assert tsb(get_freq_code(freqstr)[0]) == get_freq_code(exp_freqstr)[0] + left_code = to_offset(freqstr)._period_dtype_code + exp_code = to_offset(exp_freqstr)._period_dtype_code + assert get_to_timestamp_base(left_code) == exp_code @pytest.mark.parametrize( @@ -144,43 +71,3 @@ def test_cat(args): with pytest.raises(ValueError, match=msg): to_offset(str(args[0]) + args[1]) - - -@pytest.mark.parametrize( - "freq_input,expected", - [ - # Frequency string. - ("A", (get_freq_code("A")[0], 1)), - ("3D", (get_freq_code("D")[0], 3)), - ("-2M", (get_freq_code("M")[0], -2)), - # Tuple. - (("D", 1), (get_freq_code("D")[0], 1)), - (("A", 3), (get_freq_code("A")[0], 3)), - (("M", -2), (get_freq_code("M")[0], -2)), - ((5, "T"), (FreqGroup.FR_MIN, 5)), - # Numeric Tuple. - ((1000, 1), (1000, 1)), - # Offsets. - (offsets.Day(), (get_freq_code("D")[0], 1)), - (offsets.Day(3), (get_freq_code("D")[0], 3)), - (offsets.Day(-2), (get_freq_code("D")[0], -2)), - (offsets.MonthEnd(), (get_freq_code("M")[0], 1)), - (offsets.MonthEnd(3), (get_freq_code("M")[0], 3)), - (offsets.MonthEnd(-2), (get_freq_code("M")[0], -2)), - (offsets.Week(), (get_freq_code("W")[0], 1)), - (offsets.Week(3), (get_freq_code("W")[0], 3)), - (offsets.Week(-2), (get_freq_code("W")[0], -2)), - (offsets.Hour(), (FreqGroup.FR_HR, 1)), - # Monday is weekday=0. - (offsets.Week(weekday=1), (get_freq_code("W-TUE")[0], 1)), - (offsets.Week(3, weekday=0), (get_freq_code("W-MON")[0], 3)), - (offsets.Week(-2, weekday=4), (get_freq_code("W-FRI")[0], -2)), - ], -) -def test_get_freq_code(freq_input, expected): - assert get_freq_code(freq_input) == expected - - -def test_get_code_invalid(): - with pytest.raises(ValueError, match="Invalid frequency"): - get_freq_code((5, "baz")) diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py index c32ad5087ab9e..95edd038dab9b 100644 --- a/pandas/tests/tseries/frequencies/test_inference.py +++ b/pandas/tests/tseries/frequencies/test_inference.py @@ -4,7 +4,7 @@ import pytest from pandas._libs.tslibs.ccalendar import DAYS, MONTHS -from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG from pandas.compat import is_platform_windows from pandas import DatetimeIndex, Index, Series, Timestamp, date_range, period_range diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py index f0ce104a68e29..7713be67a7e05 100644 --- a/pandas/tests/tseries/offsets/test_fiscal.py +++ b/pandas/tests/tseries/offsets/test_fiscal.py @@ -6,7 +6,7 @@ from dateutil.relativedelta import relativedelta import pytest -from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG from pandas import Timestamp import pandas._testing as tm diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index e3a89d9ed57a6..784c04f225630 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -11,13 +11,9 @@ conversion, timezones, ) -from pandas._libs.tslibs.frequencies import ( - INVALID_FREQ_ERR_MSG, - get_freq_code, - get_freq_str, -) import pandas._libs.tslibs.offsets as liboffsets from pandas._libs.tslibs.offsets import ApplyTypeError, _get_offset, _offset_map +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG import pandas.compat as compat from pandas.compat.numpy import np_datetime64_compat from pandas.errors import PerformanceWarning @@ -4112,13 +4108,6 @@ def test_rule_code(self): assert alias == _get_offset(alias).rule_code assert alias == (_get_offset(alias) * 5).rule_code - lst = ["M", "D", "B", "H", "T", "S", "L", "U"] - for k in lst: - code, stride = get_freq_code("3" + k) - assert isinstance(code, int) - assert stride == 3 - assert k == get_freq_str(code) - def test_dateoffset_misc(): oset = offsets.DateOffset(months=2, days=4) diff --git a/pandas/tests/tslibs/test_libfrequencies.py b/pandas/tests/tslibs/test_libfrequencies.py index feaaaf6adca6f..993f2f4c8ef10 100644 --- a/pandas/tests/tslibs/test_libfrequencies.py +++ b/pandas/tests/tslibs/test_libfrequencies.py @@ -1,6 +1,5 @@ import pytest -from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_str_to_code from pandas._libs.tslibs.parsing import get_rule_month from pandas.tseries import offsets @@ -28,51 +27,3 @@ def test_get_rule_month(obj, expected): result = get_rule_month(obj) assert result == expected - - -@pytest.mark.parametrize( - "obj,expected", - [ - ("A", 1000), - ("A-DEC", 1000), - ("A-JAN", 1001), - ("Y", 1000), - ("Y-DEC", 1000), - ("Y-JAN", 1001), - ("Q", 2000), - ("Q-DEC", 2000), - ("Q-FEB", 2002), - ("W", 4000), - ("W-SUN", 4000), - ("W-FRI", 4005), - ("Min", 8000), - ("ms", 10000), - ("US", 11000), - ("NS", 12000), - ], -) -def test_period_str_to_code(obj, expected): - assert _period_str_to_code(obj) == expected - - -@pytest.mark.parametrize( - "freq,expected,aliases", - [ - ("D", 6000, ["DAY", "DLY", "DAILY"]), - ("M", 3000, ["MTH", "MONTH", "MONTHLY"]), - ("N", 12000, ["NANOSECOND", "NANOSECONDLY"]), - ("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"]), - ("T", 8000, ["minute", "MINUTE", "MINUTELY"]), - ("L", 10000, ["MILLISECOND", "MILLISECONDLY"]), - ("U", 11000, ["MICROSECOND", "MICROSECONDLY"]), - ("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"]), - ("B", 5000, ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY"]), - ], -) -def test_assert_aliases_deprecated(freq, expected, aliases): - assert isinstance(aliases, list) - assert _period_str_to_code(freq) == expected - - for alias in aliases: - with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG): - _period_str_to_code(alias) diff --git a/pandas/tests/tslibs/test_period_asfreq.py b/pandas/tests/tslibs/test_period_asfreq.py index 7205c3cc676cf..63298b657e341 100644 --- a/pandas/tests/tslibs/test_period_asfreq.py +++ b/pandas/tests/tslibs/test_period_asfreq.py @@ -1,9 +1,15 @@ import pytest -from pandas._libs.tslibs.frequencies import get_freq_code +from pandas._libs.tslibs import to_offset from pandas._libs.tslibs.period import period_asfreq, period_ordinal +def get_freq_code(freqstr: str) -> int: + off = to_offset(freqstr) + code = off._period_dtype_code + return code + + @pytest.mark.parametrize( "freq1,freq2,expected", [ @@ -32,8 +38,7 @@ ) def test_intra_day_conversion_factors(freq1, freq2, expected): assert ( - period_asfreq(1, get_freq_code(freq1)[0], get_freq_code(freq2)[0], False) - == expected + period_asfreq(1, get_freq_code(freq1), get_freq_code(freq2), False) == expected ) @@ -42,7 +47,7 @@ def test_intra_day_conversion_factors(freq1, freq2, expected): ) def test_period_ordinal_start_values(freq, expected): # information for Jan. 1, 1970. - assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq_code(freq)[0]) == expected + assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq_code(freq)) == expected @pytest.mark.parametrize( @@ -55,7 +60,7 @@ def test_period_ordinal_start_values(freq, expected): ], ) def test_period_ordinal_week(dt, expected): - args = dt + (get_freq_code("W")[0],) + args = dt + (get_freq_code("W"),) assert period_ordinal(*args) == expected @@ -77,5 +82,6 @@ def test_period_ordinal_week(dt, expected): ], ) def test_period_ordinal_business_day(day, expected): - args = (2013, 10, day, 0, 0, 0, 0, 0, get_freq_code("B")[0]) + # 5000 is PeriodDtypeCode for BusinessDay + args = (2013, 10, day, 0, 0, 0, 0, 0, 5000) assert period_ordinal(*args) == expected
Sits on top of #34587
https://api.github.com/repos/pandas-dev/pandas/pulls/34674
2020-06-09T18:24:02Z
2020-06-10T01:05:06Z
2020-06-10T01:05:06Z
2020-06-10T01:39:11Z
PERF: normalize_i8_timestamps
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index b0bad119d6a46..152e9a5ad7ddc 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -785,7 +785,6 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t int64_t[:] deltas str typ Py_ssize_t[:] pos - npy_datetimestruct dts int64_t delta, local_val if tz is None or is_utc(tz): @@ -795,16 +794,14 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t result[i] = NPY_NAT continue local_val = stamps[i] - dt64_to_dtstruct(local_val, &dts) - result[i] = _normalized_stamp(&dts) + result[i] = _normalize_i8_stamp(local_val) elif is_tzlocal(tz): for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT continue local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - dt64_to_dtstruct(local_val, &dts) - result[i] = _normalized_stamp(&dts) + result[i] = _normalize_i8_stamp(local_val) else: # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) @@ -816,38 +813,36 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t if stamps[i] == NPY_NAT: result[i] = NPY_NAT continue - dt64_to_dtstruct(stamps[i] + delta, &dts) - result[i] = _normalized_stamp(&dts) + local_val = stamps[i] + delta + result[i] = _normalize_i8_stamp(local_val) else: pos = trans.searchsorted(stamps, side='right') - 1 for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT continue - dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) - result[i] = _normalized_stamp(&dts) + local_val = stamps[i] + deltas[pos[i]] + result[i] = _normalize_i8_stamp(local_val) return result.base # `.base` to access underlying ndarray -cdef inline int64_t _normalized_stamp(npy_datetimestruct *dts) nogil: +@cython.cdivision +cdef inline int64_t _normalize_i8_stamp(int64_t local_val) nogil: """ - Normalize the given datetimestruct to midnight, then convert to int64_t. + Round the localized nanosecond timestamp down to the previous midnight. Parameters ---------- - *dts : pointer to npy_datetimestruct + local_val : int64_t Returns ------- - stamp : int64 - """ - dts.hour = 0 - dts.min = 0 - dts.sec = 0 - dts.us = 0 - dts.ps = 0 - return dtstruct_to_dt64(dts) + int64_t + """ + cdef: + int64_t day_nanos = 24 * 3600 * 1_000_000_000 + return local_val - (local_val % day_nanos) @cython.wraparound(False)
xref #34507 Based on the asv mentioned in that thread: ``` In [3]: N = 10**5 In [4]: tz = "UTC" In [5]: series = Series(date_range(start="1/1/2000", periods=N, freq="T", tz=tz)) In [6]: %timeit series.dt.normalize() 947 µs ± 4.33 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) # <-- PR 3.26 ms ± 43.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) # <-- master ```
https://api.github.com/repos/pandas-dev/pandas/pulls/34672
2020-06-09T16:47:38Z
2020-06-09T17:59:27Z
2020-06-09T17:59:27Z
2020-06-09T18:03:23Z
BLD: pyproject.toml for Py38
diff --git a/doc/source/whatsnew/v1.0.5.rst b/doc/source/whatsnew/v1.0.5.rst index 1edc7e1cad72f..5dbc911407784 100644 --- a/doc/source/whatsnew/v1.0.5.rst +++ b/doc/source/whatsnew/v1.0.5.rst @@ -22,7 +22,8 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- + +- Fixed building from source with Python 3.8 fetching the wrong version of NumPy (:issue:`34666`) - Contributors diff --git a/pyproject.toml b/pyproject.toml index efeb24edbdeb1..aaebcff8e4c1e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,9 +6,11 @@ requires = [ "wheel", "Cython>=0.29.16", # Note: sync with setup.py "numpy==1.15.4; python_version=='3.6' and platform_system!='AIX'", - "numpy==1.15.4; python_version>='3.7' and platform_system!='AIX'", + "numpy==1.15.4; python_version=='3.7' and platform_system!='AIX'", + "numpy==1.17.3; python_version>='3.8' and platform_system!='AIX'", "numpy==1.16.0; python_version=='3.6' and platform_system=='AIX'", - "numpy==1.16.0; python_version>='3.7' and platform_system=='AIX'", + "numpy==1.16.0; python_version=='3.7' and platform_system=='AIX'", + "numpy==1.17.3; python_version>='3.8' and platform_system=='AIX'", ] [tool.black]
Closes https://github.com/pandas-dev/pandas/issues/34666
https://api.github.com/repos/pandas-dev/pandas/pulls/34667
2020-06-09T14:29:53Z
2020-06-11T15:29:02Z
2020-06-11T15:29:02Z
2020-06-15T09:17:23Z
Backport PR #34458 on branch 1.0.x (CLN: Clean csv files in test data GH34427)
diff --git a/doc/source/getting_started/comparison/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst index 4e284fe7b5968..a29c9ec1c3842 100644 --- a/doc/source/getting_started/comparison/comparison_with_sas.rst +++ b/doc/source/getting_started/comparison/comparison_with_sas.rst @@ -115,7 +115,7 @@ Reading external data Like SAS, pandas provides utilities for reading in data from many formats. The ``tips`` dataset, found within the pandas -tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv>`_) +tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/io/data/csv/tips.csv>`_) will be used in many of the following examples. SAS provides ``PROC IMPORT`` to read csv data into a data set. @@ -131,7 +131,7 @@ The pandas method is :func:`read_csv`, which works similarly. .. ipython:: python url = ('https://raw.github.com/pandas-dev/' - 'pandas/master/pandas/tests/data/tips.csv') + 'pandas/master/pandas/tests/io/data/csv/tips.csv') tips = pd.read_csv(url) tips.head() diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst index 6a03c06de3699..2efd3b1854491 100644 --- a/doc/source/getting_started/comparison/comparison_with_sql.rst +++ b/doc/source/getting_started/comparison/comparison_with_sql.rst @@ -25,7 +25,7 @@ structure. .. ipython:: python url = ('https://raw.github.com/pandas-dev' - '/pandas/master/pandas/tests/data/tips.csv') + '/pandas/master/pandas/tests/io/data/csv/tips.csv') tips = pd.read_csv(url) tips.head() diff --git a/doc/source/getting_started/comparison/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst index fec6bae1e0330..31cb76eea5b46 100644 --- a/doc/source/getting_started/comparison/comparison_with_stata.rst +++ b/doc/source/getting_started/comparison/comparison_with_stata.rst @@ -112,7 +112,7 @@ Reading external data Like Stata, pandas provides utilities for reading in data from many formats. The ``tips`` data set, found within the pandas -tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv>`_) +tests (`csv <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/io/data/csv/tips.csv>`_) will be used in many of the following examples. Stata provides ``import delimited`` to read csv data into a data set in memory. @@ -128,7 +128,7 @@ the data set if presented with a url. .. ipython:: python url = ('https://raw.github.com/pandas-dev' - '/pandas/master/pandas/tests/data/tips.csv') + '/pandas/master/pandas/tests/io/data/csv/tips.csv') tips = pd.read_csv(url) tips.head() diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index 39051440e9d9a..4fde053a2070f 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -865,7 +865,7 @@ for more information. By coloring these curves differently for each class it is possible to visualize data clustering. Curves belonging to samples of the same class will usually be closer together and form larger structures. -**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__. +**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/io/data/csv/iris.csv>`__. .. ipython:: python @@ -1025,7 +1025,7 @@ be colored differently. See the R package `Radviz <https://cran.r-project.org/package=Radviz/>`__ for more information. -**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__. +**Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/io/data/csv/iris.csv>`__. .. ipython:: python diff --git a/pandas/conftest.py b/pandas/conftest.py index 9eb2db19064c1..0b5efdc654cb5 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -401,7 +401,7 @@ def iris(datapath): """ The iris dataset as a DataFrame. """ - return pd.read_csv(datapath("data", "iris.csv")) + return pd.read_csv(datapath("io", "data", "csv", "iris.csv")) @pytest.fixture(params=["nlargest", "nsmallest"]) diff --git a/pandas/tests/data/iris.csv b/pandas/tests/data/iris.csv deleted file mode 100644 index c19b9c3688515..0000000000000 --- a/pandas/tests/data/iris.csv +++ /dev/null @@ -1,151 +0,0 @@ -SepalLength,SepalWidth,PetalLength,PetalWidth,Name -5.1,3.5,1.4,0.2,Iris-setosa -4.9,3.0,1.4,0.2,Iris-setosa -4.7,3.2,1.3,0.2,Iris-setosa -4.6,3.1,1.5,0.2,Iris-setosa -5.0,3.6,1.4,0.2,Iris-setosa -5.4,3.9,1.7,0.4,Iris-setosa -4.6,3.4,1.4,0.3,Iris-setosa -5.0,3.4,1.5,0.2,Iris-setosa -4.4,2.9,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.4,3.7,1.5,0.2,Iris-setosa -4.8,3.4,1.6,0.2,Iris-setosa -4.8,3.0,1.4,0.1,Iris-setosa -4.3,3.0,1.1,0.1,Iris-setosa -5.8,4.0,1.2,0.2,Iris-setosa -5.7,4.4,1.5,0.4,Iris-setosa -5.4,3.9,1.3,0.4,Iris-setosa -5.1,3.5,1.4,0.3,Iris-setosa -5.7,3.8,1.7,0.3,Iris-setosa -5.1,3.8,1.5,0.3,Iris-setosa -5.4,3.4,1.7,0.2,Iris-setosa -5.1,3.7,1.5,0.4,Iris-setosa -4.6,3.6,1.0,0.2,Iris-setosa -5.1,3.3,1.7,0.5,Iris-setosa -4.8,3.4,1.9,0.2,Iris-setosa -5.0,3.0,1.6,0.2,Iris-setosa -5.0,3.4,1.6,0.4,Iris-setosa -5.2,3.5,1.5,0.2,Iris-setosa -5.2,3.4,1.4,0.2,Iris-setosa -4.7,3.2,1.6,0.2,Iris-setosa -4.8,3.1,1.6,0.2,Iris-setosa -5.4,3.4,1.5,0.4,Iris-setosa -5.2,4.1,1.5,0.1,Iris-setosa -5.5,4.2,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.0,3.2,1.2,0.2,Iris-setosa -5.5,3.5,1.3,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -4.4,3.0,1.3,0.2,Iris-setosa -5.1,3.4,1.5,0.2,Iris-setosa -5.0,3.5,1.3,0.3,Iris-setosa -4.5,2.3,1.3,0.3,Iris-setosa -4.4,3.2,1.3,0.2,Iris-setosa -5.0,3.5,1.6,0.6,Iris-setosa -5.1,3.8,1.9,0.4,Iris-setosa -4.8,3.0,1.4,0.3,Iris-setosa -5.1,3.8,1.6,0.2,Iris-setosa -4.6,3.2,1.4,0.2,Iris-setosa -5.3,3.7,1.5,0.2,Iris-setosa -5.0,3.3,1.4,0.2,Iris-setosa -7.0,3.2,4.7,1.4,Iris-versicolor -6.4,3.2,4.5,1.5,Iris-versicolor -6.9,3.1,4.9,1.5,Iris-versicolor -5.5,2.3,4.0,1.3,Iris-versicolor -6.5,2.8,4.6,1.5,Iris-versicolor -5.7,2.8,4.5,1.3,Iris-versicolor -6.3,3.3,4.7,1.6,Iris-versicolor -4.9,2.4,3.3,1.0,Iris-versicolor -6.6,2.9,4.6,1.3,Iris-versicolor -5.2,2.7,3.9,1.4,Iris-versicolor -5.0,2.0,3.5,1.0,Iris-versicolor -5.9,3.0,4.2,1.5,Iris-versicolor -6.0,2.2,4.0,1.0,Iris-versicolor -6.1,2.9,4.7,1.4,Iris-versicolor -5.6,2.9,3.6,1.3,Iris-versicolor -6.7,3.1,4.4,1.4,Iris-versicolor -5.6,3.0,4.5,1.5,Iris-versicolor -5.8,2.7,4.1,1.0,Iris-versicolor -6.2,2.2,4.5,1.5,Iris-versicolor -5.6,2.5,3.9,1.1,Iris-versicolor -5.9,3.2,4.8,1.8,Iris-versicolor -6.1,2.8,4.0,1.3,Iris-versicolor -6.3,2.5,4.9,1.5,Iris-versicolor -6.1,2.8,4.7,1.2,Iris-versicolor -6.4,2.9,4.3,1.3,Iris-versicolor -6.6,3.0,4.4,1.4,Iris-versicolor -6.8,2.8,4.8,1.4,Iris-versicolor -6.7,3.0,5.0,1.7,Iris-versicolor -6.0,2.9,4.5,1.5,Iris-versicolor -5.7,2.6,3.5,1.0,Iris-versicolor -5.5,2.4,3.8,1.1,Iris-versicolor -5.5,2.4,3.7,1.0,Iris-versicolor -5.8,2.7,3.9,1.2,Iris-versicolor -6.0,2.7,5.1,1.6,Iris-versicolor -5.4,3.0,4.5,1.5,Iris-versicolor -6.0,3.4,4.5,1.6,Iris-versicolor -6.7,3.1,4.7,1.5,Iris-versicolor -6.3,2.3,4.4,1.3,Iris-versicolor -5.6,3.0,4.1,1.3,Iris-versicolor -5.5,2.5,4.0,1.3,Iris-versicolor -5.5,2.6,4.4,1.2,Iris-versicolor -6.1,3.0,4.6,1.4,Iris-versicolor -5.8,2.6,4.0,1.2,Iris-versicolor -5.0,2.3,3.3,1.0,Iris-versicolor -5.6,2.7,4.2,1.3,Iris-versicolor -5.7,3.0,4.2,1.2,Iris-versicolor -5.7,2.9,4.2,1.3,Iris-versicolor -6.2,2.9,4.3,1.3,Iris-versicolor -5.1,2.5,3.0,1.1,Iris-versicolor -5.7,2.8,4.1,1.3,Iris-versicolor -6.3,3.3,6.0,2.5,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -7.1,3.0,5.9,2.1,Iris-virginica -6.3,2.9,5.6,1.8,Iris-virginica -6.5,3.0,5.8,2.2,Iris-virginica -7.6,3.0,6.6,2.1,Iris-virginica -4.9,2.5,4.5,1.7,Iris-virginica -7.3,2.9,6.3,1.8,Iris-virginica -6.7,2.5,5.8,1.8,Iris-virginica -7.2,3.6,6.1,2.5,Iris-virginica -6.5,3.2,5.1,2.0,Iris-virginica -6.4,2.7,5.3,1.9,Iris-virginica -6.8,3.0,5.5,2.1,Iris-virginica -5.7,2.5,5.0,2.0,Iris-virginica -5.8,2.8,5.1,2.4,Iris-virginica -6.4,3.2,5.3,2.3,Iris-virginica -6.5,3.0,5.5,1.8,Iris-virginica -7.7,3.8,6.7,2.2,Iris-virginica -7.7,2.6,6.9,2.3,Iris-virginica -6.0,2.2,5.0,1.5,Iris-virginica -6.9,3.2,5.7,2.3,Iris-virginica -5.6,2.8,4.9,2.0,Iris-virginica -7.7,2.8,6.7,2.0,Iris-virginica -6.3,2.7,4.9,1.8,Iris-virginica -6.7,3.3,5.7,2.1,Iris-virginica -7.2,3.2,6.0,1.8,Iris-virginica -6.2,2.8,4.8,1.8,Iris-virginica -6.1,3.0,4.9,1.8,Iris-virginica -6.4,2.8,5.6,2.1,Iris-virginica -7.2,3.0,5.8,1.6,Iris-virginica -7.4,2.8,6.1,1.9,Iris-virginica -7.9,3.8,6.4,2.0,Iris-virginica -6.4,2.8,5.6,2.2,Iris-virginica -6.3,2.8,5.1,1.5,Iris-virginica -6.1,2.6,5.6,1.4,Iris-virginica -7.7,3.0,6.1,2.3,Iris-virginica -6.3,3.4,5.6,2.4,Iris-virginica -6.4,3.1,5.5,1.8,Iris-virginica -6.0,3.0,4.8,1.8,Iris-virginica -6.9,3.1,5.4,2.1,Iris-virginica -6.7,3.1,5.6,2.4,Iris-virginica -6.9,3.1,5.1,2.3,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -6.8,3.2,5.9,2.3,Iris-virginica -6.7,3.3,5.7,2.5,Iris-virginica -6.7,3.0,5.2,2.3,Iris-virginica -6.3,2.5,5.0,1.9,Iris-virginica -6.5,3.0,5.2,2.0,Iris-virginica -6.2,3.4,5.4,2.3,Iris-virginica -5.9,3.0,5.1,1.8,Iris-virginica \ No newline at end of file diff --git a/pandas/tests/data/tips.csv b/pandas/tests/data/tips.csv deleted file mode 100644 index 856a65a69e647..0000000000000 --- a/pandas/tests/data/tips.csv +++ /dev/null @@ -1,245 +0,0 @@ -total_bill,tip,sex,smoker,day,time,size -16.99,1.01,Female,No,Sun,Dinner,2 -10.34,1.66,Male,No,Sun,Dinner,3 -21.01,3.5,Male,No,Sun,Dinner,3 -23.68,3.31,Male,No,Sun,Dinner,2 -24.59,3.61,Female,No,Sun,Dinner,4 -25.29,4.71,Male,No,Sun,Dinner,4 -8.77,2.0,Male,No,Sun,Dinner,2 -26.88,3.12,Male,No,Sun,Dinner,4 -15.04,1.96,Male,No,Sun,Dinner,2 -14.78,3.23,Male,No,Sun,Dinner,2 -10.27,1.71,Male,No,Sun,Dinner,2 -35.26,5.0,Female,No,Sun,Dinner,4 -15.42,1.57,Male,No,Sun,Dinner,2 -18.43,3.0,Male,No,Sun,Dinner,4 -14.83,3.02,Female,No,Sun,Dinner,2 -21.58,3.92,Male,No,Sun,Dinner,2 -10.33,1.67,Female,No,Sun,Dinner,3 -16.29,3.71,Male,No,Sun,Dinner,3 -16.97,3.5,Female,No,Sun,Dinner,3 -20.65,3.35,Male,No,Sat,Dinner,3 -17.92,4.08,Male,No,Sat,Dinner,2 -20.29,2.75,Female,No,Sat,Dinner,2 -15.77,2.23,Female,No,Sat,Dinner,2 -39.42,7.58,Male,No,Sat,Dinner,4 -19.82,3.18,Male,No,Sat,Dinner,2 -17.81,2.34,Male,No,Sat,Dinner,4 -13.37,2.0,Male,No,Sat,Dinner,2 -12.69,2.0,Male,No,Sat,Dinner,2 -21.7,4.3,Male,No,Sat,Dinner,2 -19.65,3.0,Female,No,Sat,Dinner,2 -9.55,1.45,Male,No,Sat,Dinner,2 -18.35,2.5,Male,No,Sat,Dinner,4 -15.06,3.0,Female,No,Sat,Dinner,2 -20.69,2.45,Female,No,Sat,Dinner,4 -17.78,3.27,Male,No,Sat,Dinner,2 -24.06,3.6,Male,No,Sat,Dinner,3 -16.31,2.0,Male,No,Sat,Dinner,3 -16.93,3.07,Female,No,Sat,Dinner,3 -18.69,2.31,Male,No,Sat,Dinner,3 -31.27,5.0,Male,No,Sat,Dinner,3 -16.04,2.24,Male,No,Sat,Dinner,3 -17.46,2.54,Male,No,Sun,Dinner,2 -13.94,3.06,Male,No,Sun,Dinner,2 -9.68,1.32,Male,No,Sun,Dinner,2 -30.4,5.6,Male,No,Sun,Dinner,4 -18.29,3.0,Male,No,Sun,Dinner,2 -22.23,5.0,Male,No,Sun,Dinner,2 -32.4,6.0,Male,No,Sun,Dinner,4 -28.55,2.05,Male,No,Sun,Dinner,3 -18.04,3.0,Male,No,Sun,Dinner,2 -12.54,2.5,Male,No,Sun,Dinner,2 -10.29,2.6,Female,No,Sun,Dinner,2 -34.81,5.2,Female,No,Sun,Dinner,4 -9.94,1.56,Male,No,Sun,Dinner,2 -25.56,4.34,Male,No,Sun,Dinner,4 -19.49,3.51,Male,No,Sun,Dinner,2 -38.01,3.0,Male,Yes,Sat,Dinner,4 -26.41,1.5,Female,No,Sat,Dinner,2 -11.24,1.76,Male,Yes,Sat,Dinner,2 -48.27,6.73,Male,No,Sat,Dinner,4 -20.29,3.21,Male,Yes,Sat,Dinner,2 -13.81,2.0,Male,Yes,Sat,Dinner,2 -11.02,1.98,Male,Yes,Sat,Dinner,2 -18.29,3.76,Male,Yes,Sat,Dinner,4 -17.59,2.64,Male,No,Sat,Dinner,3 -20.08,3.15,Male,No,Sat,Dinner,3 -16.45,2.47,Female,No,Sat,Dinner,2 -3.07,1.0,Female,Yes,Sat,Dinner,1 -20.23,2.01,Male,No,Sat,Dinner,2 -15.01,2.09,Male,Yes,Sat,Dinner,2 -12.02,1.97,Male,No,Sat,Dinner,2 -17.07,3.0,Female,No,Sat,Dinner,3 -26.86,3.14,Female,Yes,Sat,Dinner,2 -25.28,5.0,Female,Yes,Sat,Dinner,2 -14.73,2.2,Female,No,Sat,Dinner,2 -10.51,1.25,Male,No,Sat,Dinner,2 -17.92,3.08,Male,Yes,Sat,Dinner,2 -27.2,4.0,Male,No,Thur,Lunch,4 -22.76,3.0,Male,No,Thur,Lunch,2 -17.29,2.71,Male,No,Thur,Lunch,2 -19.44,3.0,Male,Yes,Thur,Lunch,2 -16.66,3.4,Male,No,Thur,Lunch,2 -10.07,1.83,Female,No,Thur,Lunch,1 -32.68,5.0,Male,Yes,Thur,Lunch,2 -15.98,2.03,Male,No,Thur,Lunch,2 -34.83,5.17,Female,No,Thur,Lunch,4 -13.03,2.0,Male,No,Thur,Lunch,2 -18.28,4.0,Male,No,Thur,Lunch,2 -24.71,5.85,Male,No,Thur,Lunch,2 -21.16,3.0,Male,No,Thur,Lunch,2 -28.97,3.0,Male,Yes,Fri,Dinner,2 -22.49,3.5,Male,No,Fri,Dinner,2 -5.75,1.0,Female,Yes,Fri,Dinner,2 -16.32,4.3,Female,Yes,Fri,Dinner,2 -22.75,3.25,Female,No,Fri,Dinner,2 -40.17,4.73,Male,Yes,Fri,Dinner,4 -27.28,4.0,Male,Yes,Fri,Dinner,2 -12.03,1.5,Male,Yes,Fri,Dinner,2 -21.01,3.0,Male,Yes,Fri,Dinner,2 -12.46,1.5,Male,No,Fri,Dinner,2 -11.35,2.5,Female,Yes,Fri,Dinner,2 -15.38,3.0,Female,Yes,Fri,Dinner,2 -44.3,2.5,Female,Yes,Sat,Dinner,3 -22.42,3.48,Female,Yes,Sat,Dinner,2 -20.92,4.08,Female,No,Sat,Dinner,2 -15.36,1.64,Male,Yes,Sat,Dinner,2 -20.49,4.06,Male,Yes,Sat,Dinner,2 -25.21,4.29,Male,Yes,Sat,Dinner,2 -18.24,3.76,Male,No,Sat,Dinner,2 -14.31,4.0,Female,Yes,Sat,Dinner,2 -14.0,3.0,Male,No,Sat,Dinner,2 -7.25,1.0,Female,No,Sat,Dinner,1 -38.07,4.0,Male,No,Sun,Dinner,3 -23.95,2.55,Male,No,Sun,Dinner,2 -25.71,4.0,Female,No,Sun,Dinner,3 -17.31,3.5,Female,No,Sun,Dinner,2 -29.93,5.07,Male,No,Sun,Dinner,4 -10.65,1.5,Female,No,Thur,Lunch,2 -12.43,1.8,Female,No,Thur,Lunch,2 -24.08,2.92,Female,No,Thur,Lunch,4 -11.69,2.31,Male,No,Thur,Lunch,2 -13.42,1.68,Female,No,Thur,Lunch,2 -14.26,2.5,Male,No,Thur,Lunch,2 -15.95,2.0,Male,No,Thur,Lunch,2 -12.48,2.52,Female,No,Thur,Lunch,2 -29.8,4.2,Female,No,Thur,Lunch,6 -8.52,1.48,Male,No,Thur,Lunch,2 -14.52,2.0,Female,No,Thur,Lunch,2 -11.38,2.0,Female,No,Thur,Lunch,2 -22.82,2.18,Male,No,Thur,Lunch,3 -19.08,1.5,Male,No,Thur,Lunch,2 -20.27,2.83,Female,No,Thur,Lunch,2 -11.17,1.5,Female,No,Thur,Lunch,2 -12.26,2.0,Female,No,Thur,Lunch,2 -18.26,3.25,Female,No,Thur,Lunch,2 -8.51,1.25,Female,No,Thur,Lunch,2 -10.33,2.0,Female,No,Thur,Lunch,2 -14.15,2.0,Female,No,Thur,Lunch,2 -16.0,2.0,Male,Yes,Thur,Lunch,2 -13.16,2.75,Female,No,Thur,Lunch,2 -17.47,3.5,Female,No,Thur,Lunch,2 -34.3,6.7,Male,No,Thur,Lunch,6 -41.19,5.0,Male,No,Thur,Lunch,5 -27.05,5.0,Female,No,Thur,Lunch,6 -16.43,2.3,Female,No,Thur,Lunch,2 -8.35,1.5,Female,No,Thur,Lunch,2 -18.64,1.36,Female,No,Thur,Lunch,3 -11.87,1.63,Female,No,Thur,Lunch,2 -9.78,1.73,Male,No,Thur,Lunch,2 -7.51,2.0,Male,No,Thur,Lunch,2 -14.07,2.5,Male,No,Sun,Dinner,2 -13.13,2.0,Male,No,Sun,Dinner,2 -17.26,2.74,Male,No,Sun,Dinner,3 -24.55,2.0,Male,No,Sun,Dinner,4 -19.77,2.0,Male,No,Sun,Dinner,4 -29.85,5.14,Female,No,Sun,Dinner,5 -48.17,5.0,Male,No,Sun,Dinner,6 -25.0,3.75,Female,No,Sun,Dinner,4 -13.39,2.61,Female,No,Sun,Dinner,2 -16.49,2.0,Male,No,Sun,Dinner,4 -21.5,3.5,Male,No,Sun,Dinner,4 -12.66,2.5,Male,No,Sun,Dinner,2 -16.21,2.0,Female,No,Sun,Dinner,3 -13.81,2.0,Male,No,Sun,Dinner,2 -17.51,3.0,Female,Yes,Sun,Dinner,2 -24.52,3.48,Male,No,Sun,Dinner,3 -20.76,2.24,Male,No,Sun,Dinner,2 -31.71,4.5,Male,No,Sun,Dinner,4 -10.59,1.61,Female,Yes,Sat,Dinner,2 -10.63,2.0,Female,Yes,Sat,Dinner,2 -50.81,10.0,Male,Yes,Sat,Dinner,3 -15.81,3.16,Male,Yes,Sat,Dinner,2 -7.25,5.15,Male,Yes,Sun,Dinner,2 -31.85,3.18,Male,Yes,Sun,Dinner,2 -16.82,4.0,Male,Yes,Sun,Dinner,2 -32.9,3.11,Male,Yes,Sun,Dinner,2 -17.89,2.0,Male,Yes,Sun,Dinner,2 -14.48,2.0,Male,Yes,Sun,Dinner,2 -9.6,4.0,Female,Yes,Sun,Dinner,2 -34.63,3.55,Male,Yes,Sun,Dinner,2 -34.65,3.68,Male,Yes,Sun,Dinner,4 -23.33,5.65,Male,Yes,Sun,Dinner,2 -45.35,3.5,Male,Yes,Sun,Dinner,3 -23.17,6.5,Male,Yes,Sun,Dinner,4 -40.55,3.0,Male,Yes,Sun,Dinner,2 -20.69,5.0,Male,No,Sun,Dinner,5 -20.9,3.5,Female,Yes,Sun,Dinner,3 -30.46,2.0,Male,Yes,Sun,Dinner,5 -18.15,3.5,Female,Yes,Sun,Dinner,3 -23.1,4.0,Male,Yes,Sun,Dinner,3 -15.69,1.5,Male,Yes,Sun,Dinner,2 -19.81,4.19,Female,Yes,Thur,Lunch,2 -28.44,2.56,Male,Yes,Thur,Lunch,2 -15.48,2.02,Male,Yes,Thur,Lunch,2 -16.58,4.0,Male,Yes,Thur,Lunch,2 -7.56,1.44,Male,No,Thur,Lunch,2 -10.34,2.0,Male,Yes,Thur,Lunch,2 -43.11,5.0,Female,Yes,Thur,Lunch,4 -13.0,2.0,Female,Yes,Thur,Lunch,2 -13.51,2.0,Male,Yes,Thur,Lunch,2 -18.71,4.0,Male,Yes,Thur,Lunch,3 -12.74,2.01,Female,Yes,Thur,Lunch,2 -13.0,2.0,Female,Yes,Thur,Lunch,2 -16.4,2.5,Female,Yes,Thur,Lunch,2 -20.53,4.0,Male,Yes,Thur,Lunch,4 -16.47,3.23,Female,Yes,Thur,Lunch,3 -26.59,3.41,Male,Yes,Sat,Dinner,3 -38.73,3.0,Male,Yes,Sat,Dinner,4 -24.27,2.03,Male,Yes,Sat,Dinner,2 -12.76,2.23,Female,Yes,Sat,Dinner,2 -30.06,2.0,Male,Yes,Sat,Dinner,3 -25.89,5.16,Male,Yes,Sat,Dinner,4 -48.33,9.0,Male,No,Sat,Dinner,4 -13.27,2.5,Female,Yes,Sat,Dinner,2 -28.17,6.5,Female,Yes,Sat,Dinner,3 -12.9,1.1,Female,Yes,Sat,Dinner,2 -28.15,3.0,Male,Yes,Sat,Dinner,5 -11.59,1.5,Male,Yes,Sat,Dinner,2 -7.74,1.44,Male,Yes,Sat,Dinner,2 -30.14,3.09,Female,Yes,Sat,Dinner,4 -12.16,2.2,Male,Yes,Fri,Lunch,2 -13.42,3.48,Female,Yes,Fri,Lunch,2 -8.58,1.92,Male,Yes,Fri,Lunch,1 -15.98,3.0,Female,No,Fri,Lunch,3 -13.42,1.58,Male,Yes,Fri,Lunch,2 -16.27,2.5,Female,Yes,Fri,Lunch,2 -10.09,2.0,Female,Yes,Fri,Lunch,2 -20.45,3.0,Male,No,Sat,Dinner,4 -13.28,2.72,Male,No,Sat,Dinner,2 -22.12,2.88,Female,Yes,Sat,Dinner,2 -24.01,2.0,Male,Yes,Sat,Dinner,4 -15.69,3.0,Male,Yes,Sat,Dinner,3 -11.61,3.39,Male,No,Sat,Dinner,2 -10.77,1.47,Male,No,Sat,Dinner,2 -15.53,3.0,Male,Yes,Sat,Dinner,2 -10.07,1.25,Male,No,Sat,Dinner,2 -12.6,1.0,Male,Yes,Sat,Dinner,2 -32.83,1.17,Male,Yes,Sat,Dinner,2 -35.83,4.67,Female,No,Sat,Dinner,3 -29.03,5.92,Male,No,Sat,Dinner,3 -27.18,2.0,Female,Yes,Sat,Dinner,2 -22.67,2.0,Male,Yes,Sat,Dinner,2 -17.82,1.75,Male,No,Sat,Dinner,2 -18.78,3.0,Female,No,Thur,Dinner,2 diff --git a/pandas/tests/io/parser/data/iris.csv b/pandas/tests/io/parser/data/iris.csv deleted file mode 100644 index c19b9c3688515..0000000000000 --- a/pandas/tests/io/parser/data/iris.csv +++ /dev/null @@ -1,151 +0,0 @@ -SepalLength,SepalWidth,PetalLength,PetalWidth,Name -5.1,3.5,1.4,0.2,Iris-setosa -4.9,3.0,1.4,0.2,Iris-setosa -4.7,3.2,1.3,0.2,Iris-setosa -4.6,3.1,1.5,0.2,Iris-setosa -5.0,3.6,1.4,0.2,Iris-setosa -5.4,3.9,1.7,0.4,Iris-setosa -4.6,3.4,1.4,0.3,Iris-setosa -5.0,3.4,1.5,0.2,Iris-setosa -4.4,2.9,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.4,3.7,1.5,0.2,Iris-setosa -4.8,3.4,1.6,0.2,Iris-setosa -4.8,3.0,1.4,0.1,Iris-setosa -4.3,3.0,1.1,0.1,Iris-setosa -5.8,4.0,1.2,0.2,Iris-setosa -5.7,4.4,1.5,0.4,Iris-setosa -5.4,3.9,1.3,0.4,Iris-setosa -5.1,3.5,1.4,0.3,Iris-setosa -5.7,3.8,1.7,0.3,Iris-setosa -5.1,3.8,1.5,0.3,Iris-setosa -5.4,3.4,1.7,0.2,Iris-setosa -5.1,3.7,1.5,0.4,Iris-setosa -4.6,3.6,1.0,0.2,Iris-setosa -5.1,3.3,1.7,0.5,Iris-setosa -4.8,3.4,1.9,0.2,Iris-setosa -5.0,3.0,1.6,0.2,Iris-setosa -5.0,3.4,1.6,0.4,Iris-setosa -5.2,3.5,1.5,0.2,Iris-setosa -5.2,3.4,1.4,0.2,Iris-setosa -4.7,3.2,1.6,0.2,Iris-setosa -4.8,3.1,1.6,0.2,Iris-setosa -5.4,3.4,1.5,0.4,Iris-setosa -5.2,4.1,1.5,0.1,Iris-setosa -5.5,4.2,1.4,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -5.0,3.2,1.2,0.2,Iris-setosa -5.5,3.5,1.3,0.2,Iris-setosa -4.9,3.1,1.5,0.1,Iris-setosa -4.4,3.0,1.3,0.2,Iris-setosa -5.1,3.4,1.5,0.2,Iris-setosa -5.0,3.5,1.3,0.3,Iris-setosa -4.5,2.3,1.3,0.3,Iris-setosa -4.4,3.2,1.3,0.2,Iris-setosa -5.0,3.5,1.6,0.6,Iris-setosa -5.1,3.8,1.9,0.4,Iris-setosa -4.8,3.0,1.4,0.3,Iris-setosa -5.1,3.8,1.6,0.2,Iris-setosa -4.6,3.2,1.4,0.2,Iris-setosa -5.3,3.7,1.5,0.2,Iris-setosa -5.0,3.3,1.4,0.2,Iris-setosa -7.0,3.2,4.7,1.4,Iris-versicolor -6.4,3.2,4.5,1.5,Iris-versicolor -6.9,3.1,4.9,1.5,Iris-versicolor -5.5,2.3,4.0,1.3,Iris-versicolor -6.5,2.8,4.6,1.5,Iris-versicolor -5.7,2.8,4.5,1.3,Iris-versicolor -6.3,3.3,4.7,1.6,Iris-versicolor -4.9,2.4,3.3,1.0,Iris-versicolor -6.6,2.9,4.6,1.3,Iris-versicolor -5.2,2.7,3.9,1.4,Iris-versicolor -5.0,2.0,3.5,1.0,Iris-versicolor -5.9,3.0,4.2,1.5,Iris-versicolor -6.0,2.2,4.0,1.0,Iris-versicolor -6.1,2.9,4.7,1.4,Iris-versicolor -5.6,2.9,3.6,1.3,Iris-versicolor -6.7,3.1,4.4,1.4,Iris-versicolor -5.6,3.0,4.5,1.5,Iris-versicolor -5.8,2.7,4.1,1.0,Iris-versicolor -6.2,2.2,4.5,1.5,Iris-versicolor -5.6,2.5,3.9,1.1,Iris-versicolor -5.9,3.2,4.8,1.8,Iris-versicolor -6.1,2.8,4.0,1.3,Iris-versicolor -6.3,2.5,4.9,1.5,Iris-versicolor -6.1,2.8,4.7,1.2,Iris-versicolor -6.4,2.9,4.3,1.3,Iris-versicolor -6.6,3.0,4.4,1.4,Iris-versicolor -6.8,2.8,4.8,1.4,Iris-versicolor -6.7,3.0,5.0,1.7,Iris-versicolor -6.0,2.9,4.5,1.5,Iris-versicolor -5.7,2.6,3.5,1.0,Iris-versicolor -5.5,2.4,3.8,1.1,Iris-versicolor -5.5,2.4,3.7,1.0,Iris-versicolor -5.8,2.7,3.9,1.2,Iris-versicolor -6.0,2.7,5.1,1.6,Iris-versicolor -5.4,3.0,4.5,1.5,Iris-versicolor -6.0,3.4,4.5,1.6,Iris-versicolor -6.7,3.1,4.7,1.5,Iris-versicolor -6.3,2.3,4.4,1.3,Iris-versicolor -5.6,3.0,4.1,1.3,Iris-versicolor -5.5,2.5,4.0,1.3,Iris-versicolor -5.5,2.6,4.4,1.2,Iris-versicolor -6.1,3.0,4.6,1.4,Iris-versicolor -5.8,2.6,4.0,1.2,Iris-versicolor -5.0,2.3,3.3,1.0,Iris-versicolor -5.6,2.7,4.2,1.3,Iris-versicolor -5.7,3.0,4.2,1.2,Iris-versicolor -5.7,2.9,4.2,1.3,Iris-versicolor -6.2,2.9,4.3,1.3,Iris-versicolor -5.1,2.5,3.0,1.1,Iris-versicolor -5.7,2.8,4.1,1.3,Iris-versicolor -6.3,3.3,6.0,2.5,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -7.1,3.0,5.9,2.1,Iris-virginica -6.3,2.9,5.6,1.8,Iris-virginica -6.5,3.0,5.8,2.2,Iris-virginica -7.6,3.0,6.6,2.1,Iris-virginica -4.9,2.5,4.5,1.7,Iris-virginica -7.3,2.9,6.3,1.8,Iris-virginica -6.7,2.5,5.8,1.8,Iris-virginica -7.2,3.6,6.1,2.5,Iris-virginica -6.5,3.2,5.1,2.0,Iris-virginica -6.4,2.7,5.3,1.9,Iris-virginica -6.8,3.0,5.5,2.1,Iris-virginica -5.7,2.5,5.0,2.0,Iris-virginica -5.8,2.8,5.1,2.4,Iris-virginica -6.4,3.2,5.3,2.3,Iris-virginica -6.5,3.0,5.5,1.8,Iris-virginica -7.7,3.8,6.7,2.2,Iris-virginica -7.7,2.6,6.9,2.3,Iris-virginica -6.0,2.2,5.0,1.5,Iris-virginica -6.9,3.2,5.7,2.3,Iris-virginica -5.6,2.8,4.9,2.0,Iris-virginica -7.7,2.8,6.7,2.0,Iris-virginica -6.3,2.7,4.9,1.8,Iris-virginica -6.7,3.3,5.7,2.1,Iris-virginica -7.2,3.2,6.0,1.8,Iris-virginica -6.2,2.8,4.8,1.8,Iris-virginica -6.1,3.0,4.9,1.8,Iris-virginica -6.4,2.8,5.6,2.1,Iris-virginica -7.2,3.0,5.8,1.6,Iris-virginica -7.4,2.8,6.1,1.9,Iris-virginica -7.9,3.8,6.4,2.0,Iris-virginica -6.4,2.8,5.6,2.2,Iris-virginica -6.3,2.8,5.1,1.5,Iris-virginica -6.1,2.6,5.6,1.4,Iris-virginica -7.7,3.0,6.1,2.3,Iris-virginica -6.3,3.4,5.6,2.4,Iris-virginica -6.4,3.1,5.5,1.8,Iris-virginica -6.0,3.0,4.8,1.8,Iris-virginica -6.9,3.1,5.4,2.1,Iris-virginica -6.7,3.1,5.6,2.4,Iris-virginica -6.9,3.1,5.1,2.3,Iris-virginica -5.8,2.7,5.1,1.9,Iris-virginica -6.8,3.2,5.9,2.3,Iris-virginica -6.7,3.3,5.7,2.5,Iris-virginica -6.7,3.0,5.2,2.3,Iris-virginica -6.3,2.5,5.0,1.9,Iris-virginica -6.5,3.0,5.2,2.0,Iris-virginica -6.2,3.4,5.4,2.3,Iris-virginica -5.9,3.0,5.1,1.8,Iris-virginica \ No newline at end of file diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index a126f83164ce5..aa9294b016a3f 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -202,8 +202,8 @@ def test_read_expands_user_home_dir( @pytest.mark.parametrize( "reader, module, path", [ - (pd.read_csv, "os", ("data", "iris.csv")), - (pd.read_table, "os", ("data", "iris.csv")), + (pd.read_csv, "os", ("io", "data", "csv", "iris.csv")), + (pd.read_table, "os", ("io", "data", "csv", "iris.csv")), ( pd.read_fwf, "os", diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 45b3e839a08d1..d0569cd1d0cdf 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -273,7 +273,7 @@ def _get_exec(self): else: return self.conn.cursor() - @pytest.fixture(params=[("data", "iris.csv")]) + @pytest.fixture(params=[("io", "data", "csv", "iris.csv")]) def load_iris_data(self, datapath, request): import io diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index 6a19adef728e4..c9dbcf4798a96 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -58,7 +58,7 @@ def test_datapath_missing(datapath): def test_datapath(datapath): - args = ("data", "iris.csv") + args = ("io", "data", "csv", "iris.csv") result = datapath(*args) expected = os.path.join(os.path.dirname(os.path.dirname(__file__)), *args)
xref #34458
https://api.github.com/repos/pandas-dev/pandas/pulls/34664
2020-06-09T11:50:34Z
2020-06-09T12:41:51Z
2020-06-09T12:41:51Z
2020-06-10T10:49:18Z
BUG: Allow plain bools in ExtensionArray.equals
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 79f0039a9df65..7f2c61ff7d955 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -738,7 +738,7 @@ def equals(self, other: "ExtensionArray") -> bool: # boolean array with NA -> fill with False equal_values = equal_values.fillna(False) equal_na = self.isna() & other.isna() - return (equal_values | equal_na).all().item() + return bool((equal_values | equal_na).all()) def _values_for_factorize(self) -> Tuple[np.ndarray, Any]: """ diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py index ffebc9f8b3359..29cfe1e0fe606 100644 --- a/pandas/tests/extension/arrow/arrays.py +++ b/pandas/tests/extension/arrow/arrays.py @@ -8,6 +8,7 @@ """ import copy import itertools +import operator from typing import Type import numpy as np @@ -106,6 +107,27 @@ def astype(self, dtype, copy=True): def dtype(self): return self._dtype + def _boolean_op(self, other, op): + if not isinstance(other, type(self)): + raise NotImplementedError() + + result = op(np.array(self._data), np.array(other._data)) + return ArrowBoolArray( + pa.chunked_array([pa.array(result, mask=pd.isna(self._data.to_pandas()))]) + ) + + def __eq__(self, other): + if not isinstance(other, type(self)): + return False + + return self._boolean_op(other, operator.eq) + + def __and__(self, other): + return self._boolean_op(other, operator.and_) + + def __or__(self, other): + return self._boolean_op(other, operator.or_) + @property def nbytes(self): return sum( @@ -153,10 +175,12 @@ def _reduce(self, method, skipna=True, **kwargs): return op(**kwargs) def any(self, axis=0, out=None): - return self._data.to_pandas().any() + # Explicitly return a plain bool to reproduce GH-34660 + return bool(self._data.to_pandas().any()) def all(self, axis=0, out=None): - return self._data.to_pandas().all() + # Explicitly return a plain bool to reproduce GH-34660 + return bool(self._data.to_pandas().all()) class ArrowBoolArray(ArrowExtensionArray): diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index 48f1c34764313..7841360e568ed 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -29,6 +29,11 @@ def data_missing(): return ArrowBoolArray.from_scalars([None, True]) +def test_basic_equals(data): + # https://github.com/pandas-dev/pandas/issues/34660 + assert pd.Series(data).equals(pd.Series(data)) + + class BaseArrowTests: pass
- [x] closes #34660 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34661
2020-06-09T09:28:51Z
2020-06-09T15:37:45Z
2020-06-09T15:37:45Z
2020-06-09T16:50:12Z
REF: avoid get_freq_code
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 55148041c1718..a5891a9e1c22c 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -79,6 +79,7 @@ from pandas._libs.tslibs.offsets cimport ( is_tick_object, is_offset_object, ) +from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG from pandas._libs.tslibs.tzconversion cimport tz_convert_utc_to_tzlocal @@ -1677,7 +1678,7 @@ cdef class _Period: freq = self._maybe_convert_freq(freq) how = validate_end_alias(how) base1 = self._dtype.dtype_code - base2, _ = get_freq_code(freq) + base2 = freq_to_dtype_code(freq) # self.n can't be negative or 0 end = how == 'E' @@ -1767,10 +1768,11 @@ cdef class _Period: if freq is None: base = self._dtype.dtype_code freq = get_to_timestamp_base(base) + base = freq else: freq = self._maybe_convert_freq(freq) + base = freq._period_dtype_code - base, _ = get_freq_code(freq) val = self.asfreq(freq, how) dt64 = period_ordinal_to_dt64(val.ordinal, base) @@ -2414,8 +2416,7 @@ class Period(_Period): elif is_period_object(value): other = value - if freq is None or get_freq_code( - freq) == get_freq_code(other.freq): + if freq is None or freq._period_dtype_code == other.freq._period_dtype_code: ordinal = other.ordinal freq = other.freq else: @@ -2442,6 +2443,7 @@ class Period(_Period): except KeyError: raise ValueError(f"Invalid frequency or could not " f"infer: {reso}") + freq = to_offset(freq) elif PyDateTime_Check(value): dt = value @@ -2460,7 +2462,7 @@ class Period(_Period): raise ValueError(msg) if ordinal is None: - base, _ = get_freq_code(freq) + base = freq_to_dtype_code(freq) ordinal = period_ordinal(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, 0, base) @@ -2472,9 +2474,17 @@ cdef bint is_period_object(object obj): return isinstance(obj, _Period) +cpdef int freq_to_dtype_code(BaseOffset freq) except? -1: + try: + return freq._period_dtype_code + except AttributeError as err: + raise ValueError(INVALID_FREQ_ERR_MSG) from err + + cdef int64_t _ordinal_from_fields(int year, int month, quarter, int day, - int hour, int minute, int second, freq): - base, mult = get_freq_code(freq) + int hour, int minute, int second, + BaseOffset freq): + base = freq_to_dtype_code(freq) if quarter is not None: year, month = quarter_to_myear(year, quarter, freq) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index b16a3df003512..0d866aa7eae26 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -442,10 +442,11 @@ def to_timestamp(self, freq=None, how="start"): if freq is None: base = self.freq._period_dtype_code freq = libfrequencies.get_to_timestamp_base(base) + base = freq else: freq = Period._maybe_convert_freq(freq) + base = freq._period_dtype_code - base, _ = libfrequencies.get_freq_code(freq) new_data = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base) @@ -962,7 +963,8 @@ def _get_ordinal_range(start, end, periods, freq, mult=1): ) if freq is not None: - _, mult = libfrequencies.get_freq_code(freq) + freq = to_offset(freq) + mult = freq.n if start is not None: start = Period(start, freq) @@ -1024,10 +1026,11 @@ def _range_from_fields( if quarter is not None: if freq is None: - freq = "Q" + freq = to_offset("Q") base = libfrequencies.FreqGroup.FR_QTR else: - base, mult = libfrequencies.get_freq_code(freq) + freq = to_offset(freq) + base = libperiod.freq_to_dtype_code(freq) if base != libfrequencies.FreqGroup.FR_QTR: raise AssertionError("base must equal FR_QTR") @@ -1037,7 +1040,8 @@ def _range_from_fields( val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) ordinals.append(val) else: - base, mult = libfrequencies.get_freq_code(freq) + freq = to_offset(freq) + base = libperiod.freq_to_dtype_code(freq) arrays = _make_field_arrays(year, month, day, hour, minute, second) for y, mth, d, h, mn, s in zip(*arrays): ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
We'll be able to rip it out entirely after this, #34658, and #34587.
https://api.github.com/repos/pandas-dev/pandas/pulls/34659
2020-06-08T22:42:17Z
2020-06-09T17:14:52Z
2020-06-09T17:14:52Z
2020-06-09T17:49:43Z
CLN: disallow passing tuples for Period freq
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index c6ba196a8a985..e5e0b2577d595 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -403,6 +403,7 @@ Backwards incompatible API changes - :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) - :func: `merge` now checks ``suffixes`` parameter type to be ``tuple`` and raises ``TypeError``, whereas before a ``list`` or ``set`` were accepted and that the ``set`` could produce unexpected results (:issue:`33740`) +- :class:`Period` no longer accepts tuples for the ``freq`` argument (:issue:`34658`) ``MultiIndex.get_indexer`` interprets `method` argument differently ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index bbdcb63d18175..4f2219ca896f7 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1516,9 +1516,10 @@ cdef class _Period: ------- DateOffset """ - if isinstance(freq, (int, tuple)): - code, stride = get_freq_code(freq) - freq = get_freq_str(code, stride) + if isinstance(freq, int): + # We already have a dtype code + dtype = PeriodDtypeBase(freq) + freq = dtype.date_offset freq = to_offset(freq) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e1f0221eaee65..d8654dee56319 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -501,7 +501,7 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime): raise KeyError grp = get_freq_group(reso) - per = Period(parsed, freq=(grp, 1)) + per = Period(parsed, freq=grp) start, end = per.start_time, per.end_time # GH 24076 diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 14922000c9707..0fafeef078d78 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -574,7 +574,7 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime): raise KeyError(reso) grp = get_freq_group(reso) - iv = Period(parsed, freq=(grp, 1)) + iv = Period(parsed, freq=grp) return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end")) def _validate_partial_date_slice(self, reso: str):
Really looking forward to killing off `get_freq_code`
https://api.github.com/repos/pandas-dev/pandas/pulls/34658
2020-06-08T22:35:33Z
2020-06-09T01:47:00Z
2020-06-09T01:47:00Z
2020-06-09T01:57:10Z
#34640: CLN: remove 'private_key' and 'verbose' from gbq
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ed48bf0675034..9e2a05b250d8f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -325,6 +325,7 @@ I/O - :meth:`to_picke` and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, and :issue:`29570`) - Bug in :func:`LongTableBuilder.middle_separator` was duplicating LaTeX longtable entires in the List of Tables of a LaTeX document (:issue:`34360`) - Bug in :meth:`read_csv` with `engine='python'` truncating data if multiple items present in first row and first element started with BOM (:issue:`36343`) +- Removed ``private_key`` and ``verbose`` from :func:`read_gbq` as they are no longer supported in `pandas-gbq` (:issue:`34654` :issue:`30200`) Plotting ^^^^^^^^ @@ -372,6 +373,7 @@ ExtensionArray Other ^^^^^ + - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`) - Fixed metadata propagation in the :class:`Series.dt` accessor (:issue:`28283`) diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 3d0792357297f..afe1234f9fa96 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -31,8 +31,6 @@ def read_gbq( credentials=None, use_bqstorage_api: Optional[bool] = None, max_results: Optional[int] = None, - private_key=None, - verbose=None, progress_bar_type: Optional[str] = None, ) -> "DataFrame": """ @@ -208,8 +206,6 @@ def to_gbq( location: Optional[str] = None, progress_bar: bool = True, credentials=None, - verbose=None, - private_key=None, ) -> None: pandas_gbq = _try_import() pandas_gbq.to_gbq( @@ -224,6 +220,4 @@ def to_gbq( location=location, progress_bar=progress_bar, credentials=credentials, - verbose=verbose, - private_key=private_key, )
- [x ] closes #34640 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Hello, I am new to contributing to open source, so thank you in advance for any corrections and suggestions. Please let me know if I need to do anything else for this ticket! I removed the code containing both 'private_key' and 'verbose' from the gbq.py file.
https://api.github.com/repos/pandas-dev/pandas/pulls/34654
2020-06-08T19:39:16Z
2020-09-24T01:34:44Z
2020-09-24T01:34:43Z
2020-09-24T01:34:48Z
DOC: updated _testing.py for PR08 errors
diff --git a/pandas/_testing.py b/pandas/_testing.py index 0180169973e0c..61eab6b8152e1 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1047,7 +1047,7 @@ def assert_extension_array_equal( check_exact : bool, default False Whether to compare number exactly. index_values : numpy.ndarray, default None - optional index (shared by both left and right), used in output. + Optional index (shared by both left and right), used in output. Notes -----
https://api.github.com/repos/pandas-dev/pandas/pulls/34653
2020-06-08T18:21:23Z
2020-06-08T22:56:50Z
2020-06-08T22:56:50Z
2020-06-08T22:57:02Z
DOC: updated plotting/_misc.py for PR08 errors
diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index 3056977ec78ad..22a2d7617fded 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -160,7 +160,7 @@ def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): Parameters ---------- frame : `DataFrame` - pandas object holding the data. + Object holding the data. class_column : str Column name containing the name of the data point category. ax : :class:`matplotlib.axes.Axes`, optional @@ -294,7 +294,7 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): Parameters ---------- series : pandas.Series - pandas Series from where to get the samplings for the bootstrapping. + Series from where to get the samplings for the bootstrapping. fig : matplotlib.figure.Figure, default None If given, it will use the `fig` reference for plotting instead of creating a new one with default parameters.
https://api.github.com/repos/pandas-dev/pandas/pulls/34652
2020-06-08T17:34:57Z
2020-06-09T17:35:41Z
2020-06-09T17:35:41Z
2020-06-09T17:36:00Z
REF: simplify libperiod.get_yq
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 32acbcfb39b50..477bf6b325365 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -497,12 +497,11 @@ cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info) nogil: return <int64_t>(dts.year - 1970) -cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year) nogil: +cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, npy_datetimestruct* dts) nogil: cdef: - npy_datetimestruct dts int quarter - pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts) + pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, dts) if af_info.to_end != 12: dts.month -= af_info.to_end if dts.month <= 0: @@ -510,19 +509,19 @@ cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year) nogil: else: dts.year += 1 - year[0] = dts.year quarter = month_to_quarter(dts.month) return quarter cdef int64_t asfreq_DTtoQ(int64_t ordinal, asfreq_info *af_info) nogil: cdef: - int year, quarter + int quarter + npy_datetimestruct dts ordinal = downsample_daytime(ordinal, af_info) - quarter = DtoQ_yq(ordinal, af_info, &year) - return <int64_t>((year - 1970) * 4 + quarter - 1) + quarter = DtoQ_yq(ordinal, af_info, &dts) + return <int64_t>((dts.year - 1970) * 4 + quarter - 1) cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info) nogil: @@ -919,7 +918,7 @@ cdef int64_t get_time_nanos(int freq, int64_t unix_date, int64_t ordinal) nogil: return sub * factor -cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): +cdef int get_yq(int64_t ordinal, int freq, npy_datetimestruct* dts): """ Find the year and quarter of a Period with the given ordinal and frequency @@ -927,22 +926,22 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): ---------- ordinal : int64_t freq : int - quarter : *int - year : *int + dts : *npy_datetimestruct Returns ------- - qtr_freq : int + quarter : int describes the implied quarterly frequency associated with `freq` Notes ----- - Sets quarter and year inplace + Sets dts.year in-place. """ cdef: asfreq_info af_info int qtr_freq int64_t unix_date + int quarter unix_date = get_unix_date(ordinal, freq) @@ -951,11 +950,10 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): else: qtr_freq = FR_QTR - assert (qtr_freq % 1000) <= 12 get_asfreq_info(FR_DAY, qtr_freq, True, &af_info) - quarter[0] = DtoQ_yq(unix_date, &af_info, year) - return qtr_freq + quarter = DtoQ_yq(unix_date, &af_info, dts) + return quarter cdef inline int month_to_quarter(int month) nogil: @@ -1225,15 +1223,15 @@ cdef str _period_strftime(int64_t value, int freq, bytes fmt): for i in range(len(extra_fmts)): if found_pat[i]: - if get_yq(value, freq, &quarter, &year) < 0: - raise ValueError('Unable to get quarter and year') + + quarter = get_yq(value, freq, &dts) if i == 0: repl = str(quarter) elif i == 1: # %f, 2-digit year - repl = f"{(year % 100):02d}" + repl = f"{(dts.year % 100):02d}" elif i == 2: - repl = str(year) + repl = str(dts.year) elif i == 3: repl = f"{(value % 1_000):03d}" elif i == 4: @@ -1259,20 +1257,19 @@ cdef int pyear(int64_t ordinal, int freq): return dts.year -@cython.cdivision cdef int pqyear(int64_t ordinal, int freq): cdef: - int year = 0 - int quarter = 0 - get_yq(ordinal, freq, &quarter, &year) - return year + npy_datetimestruct dts + + get_yq(ordinal, freq, &dts) + return dts.year cdef int pquarter(int64_t ordinal, int freq): cdef: - int year = 0 - int quarter = 0 - get_yq(ordinal, freq, &quarter, &year) + int quarter + npy_datetimestruct dts + quarter = get_yq(ordinal, freq, &dts) return quarter
https://api.github.com/repos/pandas-dev/pandas/pulls/34649
2020-06-08T16:16:09Z
2020-06-08T22:55:10Z
2020-06-08T22:55:10Z
2020-06-08T23:10:47Z
REF: de-duplicate month/year rolling in libperiod
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index bbdcb63d18175..4e8da6504d1ea 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -329,56 +329,34 @@ cdef inline int64_t transform_via_day(int64_t ordinal, # -------------------------------------------------------------------- # Conversion _to_ Daily Freq -cdef void AtoD_ym(int64_t ordinal, int64_t *year, - int *month, asfreq_info *af_info) nogil: - year[0] = ordinal + 1970 - month[0] = 1 - - if af_info.from_end != 12: - month[0] += af_info.from_end - if month[0] > 12: - # This case is never reached, but is kept for symmetry - # with QtoD_ym - month[0] -= 12 - else: - year[0] -= 1 - - cdef int64_t asfreq_AtoDT(int64_t ordinal, asfreq_info *af_info) nogil: cdef: - int64_t unix_date, year - int month + int64_t unix_date + npy_datetimestruct dts ordinal += af_info.is_end - AtoD_ym(ordinal, &year, &month, af_info) - unix_date = unix_date_from_ymd(year, month, 1) + dts.year = ordinal + 1970 + dts.month = 1 + adjust_dts_for_month(&dts, af_info.from_end) + + unix_date = unix_date_from_ymd(dts.year, dts.month, 1) unix_date -= af_info.is_end return upsample_daytime(unix_date, af_info) -cdef void QtoD_ym(int64_t ordinal, int *year, - int *month, asfreq_info *af_info) nogil: - year[0] = ordinal // 4 + 1970 - month[0] = (ordinal % 4) * 3 + 1 - - if af_info.from_end != 12: - month[0] += af_info.from_end - if month[0] > 12: - month[0] -= 12 - else: - year[0] -= 1 - - cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int64_t unix_date - int year, month + npy_datetimestruct dts ordinal += af_info.is_end - QtoD_ym(ordinal, &year, &month, af_info) - unix_date = unix_date_from_ymd(year, month, 1) + dts.year = ordinal // 4 + 1970 + dts.month = (ordinal % 4) * 3 + 1 + adjust_dts_for_month(&dts, af_info.from_end) + + unix_date = unix_date_from_ymd(dts.year, dts.month, 1) unix_date -= af_info.is_end return upsample_daytime(unix_date, af_info) @@ -486,12 +464,7 @@ cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, npy_datetimestruct* dts) int quarter pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, dts) - if af_info.to_end != 12: - dts.month -= af_info.to_end - if dts.month <= 0: - dts.month += 12 - else: - dts.year += 1 + adjust_dts_for_qtr(dts, af_info.to_end) quarter = month_to_quarter(dts.month) return quarter @@ -712,6 +685,24 @@ cdef inline int get_freq_group_index(int freq) nogil: return freq // 1000 +cdef void adjust_dts_for_month(npy_datetimestruct* dts, int from_end) nogil: + if from_end != 12: + dts.month += from_end + if dts.month > 12: + dts.month -= 12 + else: + dts.year -= 1 + + +cdef void adjust_dts_for_qtr(npy_datetimestruct* dts, int to_end) nogil: + if to_end != 12: + dts.month -= to_end + if dts.month <= 0: + dts.month += 12 + else: + dts.year += 1 + + # Find the unix_date (days elapsed since datetime(1970, 1, 1) # for the given year/month/day. # Assumes GREGORIAN_CALENDAR */
https://api.github.com/repos/pandas-dev/pandas/pulls/34648
2020-06-08T16:14:05Z
2020-06-09T00:53:55Z
2020-06-09T00:53:55Z
2020-06-09T00:55:19Z
TST: groupby apply with indexing and column aggregation returns the column #7002
diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index bc8067212d60e..8468a21904bf8 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -961,3 +961,16 @@ def fn(x): name="col2", ) tm.assert_series_equal(result, expected) + + +def test_apply_function_with_indexing_return_column(): + # GH: 7002 + df = DataFrame( + { + "foo1": ["one", "two", "two", "three", "one", "two"], + "foo2": [1, 2, 4, 4, 5, 6], + } + ) + result = df.groupby("foo1", as_index=False).apply(lambda x: x.mean()) + expected = DataFrame({"foo1": ["one", "three", "two"], "foo2": [3.0, 4.0, 4.0]}) + tm.assert_frame_equal(result, expected)
Adds test for #7002 - [x] closes #7002 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/34647
2020-06-08T15:46:45Z
2020-06-14T22:20:51Z
2020-06-14T22:20:51Z
2020-06-15T03:11:17Z
DOC: updated io/sql.py for PR08 errors
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 991d222bfae1f..b137608475b3d 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -332,11 +332,9 @@ def read_sql_query( ---------- sql : str SQL query or SQLAlchemy Selectable (select or text object) SQL query to be executed. - con : SQLAlchemy connectable(engine/connection), database str URI, - or sqlite3 DBAPI2 connection + con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that - library. - If a DBAPI2 object, only sqlite3 is supported. + library. If a DBAPI2 object, only sqlite3 is supported. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True @@ -438,9 +436,7 @@ def read_sql( ---------- sql : str or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. - con : SQLAlchemy connectable (engine/connection) or database str URI - or DBAPI2 connection (fallback mode). - + con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See
https://api.github.com/repos/pandas-dev/pandas/pulls/34646
2020-06-08T15:23:27Z
2020-06-08T22:58:12Z
2020-06-08T22:58:12Z
2020-06-08T22:58:15Z
ENH: Add max_results kwarg to read_gbq
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index b2b55b7b503ec..ccb5cb3ac574f 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -291,6 +291,7 @@ Other enhancements - :meth:`groupby.transform` now allows ``func`` to be ``pad``, ``backfill`` and ``cumcount`` (:issue:`31269`). - :meth:`~pandas.io.json.read_json` now accepts `nrows` parameter. (:issue:`33916`). - :meth `~pandas.io.gbq.read_gbq` now allows to disable progress bar (:issue:`33360`). +- :meth:`~pandas.io.gbq.read_gbq` now supports the ``max_results`` kwarg from ``pandas-gbq`` (:issue:`34639`). .. --------------------------------------------------------------------------- diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 9b46f970afc66..3d0792357297f 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -30,6 +30,7 @@ def read_gbq( configuration: Optional[Dict[str, Any]] = None, credentials=None, use_bqstorage_api: Optional[bool] = None, + max_results: Optional[int] = None, private_key=None, verbose=None, progress_bar_type: Optional[str] = None, @@ -125,6 +126,13 @@ def read_gbq( ``fastavro`` packages. .. versionadded:: 0.25.0 + max_results : int, optional + If set, limit the maximum number of rows to fetch from the query + results. + + *New in version 0.12.0 of pandas-gbq*. + + .. versionadded:: 1.1.0 progress_bar_type : Optional, str If set, use the `tqdm <https://tqdm.github.io/>`__ library to display a progress bar while the data downloads. Install the @@ -162,11 +170,13 @@ def read_gbq( """ pandas_gbq = _try_import() - kwargs: Dict[str, Union[str, bool, None]] = {} + kwargs: Dict[str, Union[str, bool, int, None]] = {} # START: new kwargs. Don't populate unless explicitly set. if use_bqstorage_api is not None: kwargs["use_bqstorage_api"] = use_bqstorage_api + if max_results is not None: + kwargs["max_results"] = max_results kwargs["progress_bar_type"] = progress_bar_type # END: new kwargs diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py index e9cefe3056130..df107259d38cd 100644 --- a/pandas/tests/io/test_gbq.py +++ b/pandas/tests/io/test_gbq.py @@ -113,9 +113,10 @@ def mock_read_gbq(sql, **kwargs): return DataFrame([[1.0]]) monkeypatch.setattr("pandas_gbq.read_gbq", mock_read_gbq) - pd.read_gbq("SELECT 1", use_bqstorage_api=True) + pd.read_gbq("SELECT 1", use_bqstorage_api=True, max_results=1) assert captured_kwargs["use_bqstorage_api"] + assert captured_kwargs["max_results"] def test_read_gbq_without_new_kwargs(monkeypatch): @@ -129,6 +130,7 @@ def mock_read_gbq(sql, **kwargs): pd.read_gbq("SELECT 1") assert "use_bqstorage_api" not in captured_kwargs + assert "max_results" not in captured_kwargs @pytest.mark.parametrize("progress_bar", [None, "foo"])
Adds support for the new `max_results` kwarg from pandas-gbq (added in [0.12.0](https://github.com/pydata/pandas-gbq/releases/tag/0.12.0)). Since `max_results` is a new kwarg, it is handled and tested in the same way as the `use_bqstorage_api` kwarg to maintain backwards compatibility. ❓ **Open question:** Setting `max_results=0` causes `pandas_gbq.read_gbq` to return `None` instead of a DataFrame. I've kept this behaviour the same in this PR, but maintainers may prefer to always return a (empty) DataFrame instead of `None`. - [x] closes #34639 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34641
2020-06-08T11:53:34Z
2020-06-09T22:17:20Z
2020-06-09T22:17:20Z
2020-06-09T22:17:24Z
CLN: deduplicate in core.internals.blocks.interpolate
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index a4a8d672895ce..e2a778f729470 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1084,14 +1084,9 @@ def interpolate( inplace = validate_bool_kwarg(inplace, "inplace") - def check_int_bool(self, inplace): - # Only FloatBlocks will contain NaNs. - # timedelta subclasses IntBlock - if (self.is_bool or self.is_integer) and not self.is_timedelta: - if inplace: - return self - else: - return self.copy() + # Only FloatBlocks will contain NaNs. timedelta subclasses IntBlock + if (self.is_bool or self.is_integer) and not self.is_timedelta: + return self if inplace else self.copy() # a fill na type method try: @@ -1100,9 +1095,6 @@ def check_int_bool(self, inplace): m = None if m is not None: - r = check_int_bool(self, inplace) - if r is not None: - return r return self._interpolate_with_fill( method=m, axis=axis, @@ -1115,10 +1107,6 @@ def check_int_bool(self, inplace): # validate the interp method m = missing.clean_interp_method(method, **kwargs) - r = check_int_bool(self, inplace) - if r is not None: - return r - assert index is not None # for mypy return self._interpolate(
broken off #34628
https://api.github.com/repos/pandas-dev/pandas/pulls/34638
2020-06-08T08:52:32Z
2020-06-09T12:36:21Z
2020-06-09T12:36:21Z
2020-06-09T12:44:41Z
CLN: EWMA cython code and function dispatch
diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index afa0539014041..9e088062d7280 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -1793,19 +1793,19 @@ def ewma(float64_t[:] vals, float64_t com, int adjust, bint ignore_na, int minp) new_wt = 1. if adjust else alpha weighted_avg = vals[0] - is_observation = (weighted_avg == weighted_avg) + is_observation = weighted_avg == weighted_avg nobs = int(is_observation) - output[0] = weighted_avg if (nobs >= minp) else NaN + output[0] = weighted_avg if nobs >= minp else NaN old_wt = 1. with nogil: for i in range(1, N): cur = vals[i] - is_observation = (cur == cur) + is_observation = cur == cur nobs += is_observation if weighted_avg == weighted_avg: - if is_observation or (not ignore_na): + if is_observation or not ignore_na: old_wt *= old_wt_factor if is_observation: @@ -1821,7 +1821,7 @@ def ewma(float64_t[:] vals, float64_t com, int adjust, bint ignore_na, int minp) elif is_observation: weighted_avg = cur - output[i] = weighted_avg if (nobs >= minp) else NaN + output[i] = weighted_avg if nobs >= minp else NaN return output @@ -1851,7 +1851,7 @@ def ewmcov(float64_t[:] input_x, float64_t[:] input_y, """ cdef: - Py_ssize_t N = len(input_x) + Py_ssize_t N = len(input_x), M = len(input_y) float64_t alpha, old_wt_factor, new_wt, mean_x, mean_y, cov float64_t sum_wt, sum_wt2, old_wt, cur_x, cur_y, old_mean_x, old_mean_y float64_t numerator, denominator @@ -1859,8 +1859,8 @@ def ewmcov(float64_t[:] input_x, float64_t[:] input_y, ndarray[float64_t] output bint is_observation - if <Py_ssize_t>len(input_y) != N: - raise ValueError(f"arrays are of different lengths ({N} and {len(input_y)})") + if M != N: + raise ValueError(f"arrays are of different lengths ({N} and {M})") output = np.empty(N, dtype=float) if N == 0: @@ -1874,12 +1874,12 @@ def ewmcov(float64_t[:] input_x, float64_t[:] input_y, mean_x = input_x[0] mean_y = input_y[0] - is_observation = ((mean_x == mean_x) and (mean_y == mean_y)) + is_observation = (mean_x == mean_x) and (mean_y == mean_y) nobs = int(is_observation) if not is_observation: mean_x = NaN mean_y = NaN - output[0] = (0. if bias else NaN) if (nobs >= minp) else NaN + output[0] = (0. if bias else NaN) if nobs >= minp else NaN cov = 0. sum_wt = 1. sum_wt2 = 1. @@ -1890,10 +1890,10 @@ def ewmcov(float64_t[:] input_x, float64_t[:] input_y, for i in range(1, N): cur_x = input_x[i] cur_y = input_y[i] - is_observation = ((cur_x == cur_x) and (cur_y == cur_y)) + is_observation = (cur_x == cur_x) and (cur_y == cur_y) nobs += is_observation if mean_x == mean_x: - if is_observation or (not ignore_na): + if is_observation or not ignore_na: sum_wt *= old_wt_factor sum_wt2 *= (old_wt_factor * old_wt_factor) old_wt *= old_wt_factor @@ -1929,8 +1929,8 @@ def ewmcov(float64_t[:] input_x, float64_t[:] input_y, if not bias: numerator = sum_wt * sum_wt denominator = numerator - sum_wt2 - if (denominator > 0.): - output[i] = ((numerator / denominator) * cov) + if denominator > 0: + output[i] = (numerator / denominator) * cov else: output[i] = NaN else: diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index d5f2b67eeac2e..a5e30c900cae2 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -1,3 +1,4 @@ +from functools import partial from textwrap import dedent import numpy as np @@ -219,7 +220,7 @@ def aggregate(self, func, *args, **kwargs): agg = aggregate - def _apply(self, func, **kwargs): + def _apply(self, func): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. @@ -253,23 +254,6 @@ def _apply(self, func, **kwargs): results.append(values.copy()) continue - # if we have a string function name, wrap it - if isinstance(func, str): - cfunc = getattr(window_aggregations, func, None) - if cfunc is None: - raise ValueError( - f"we do not support this function in window_aggregations.{func}" - ) - - def func(arg): - return cfunc( - arg, - self.com, - int(self.adjust), - int(self.ignore_na), - int(self.min_periods), - ) - results.append(np.apply_along_axis(func, self.axis, values)) return self._wrap_results(results, block_list, obj, exclude) @@ -286,7 +270,15 @@ def mean(self, *args, **kwargs): Arguments and keyword arguments to be passed into func. """ nv.validate_window_func("mean", args, kwargs) - return self._apply("ewma", **kwargs) + window_func = self._get_roll_func("ewma") + window_func = partial( + window_func, + com=self.com, + adjust=int(self.adjust), + ignore_na=self.ignore_na, + minp=int(self.min_periods), + ) + return self._apply(window_func) @Substitution(name="ewm", func_name="std") @Appender(_doc_template) @@ -320,7 +312,7 @@ def f(arg): int(bias), ) - return self._apply(f, **kwargs) + return self._apply(f) @Substitution(name="ewm", func_name="cov") @Appender(_doc_template)
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/34636
2020-06-08T07:02:17Z
2020-06-08T14:25:32Z
2020-06-08T14:25:31Z
2020-06-08T15:28:52Z
BUG/API: Disallow unit if input to Timedelta and to_timedelta is/contains str
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 2243790a663df..416291916890a 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -826,6 +826,7 @@ Timedelta - Bug in :func:`timedelta_range` that produced an extra point on a edge case (:issue:`30353`, :issue:`33498`) - Bug in :meth:`DataFrame.resample` that produced an extra point on a edge case (:issue:`30353`, :issue:`13022`, :issue:`33498`) - Bug in :meth:`DataFrame.resample` that ignored the ``loffset`` argument when dealing with timedelta (:issue:`7687`, :issue:`33498`) +- Bug in :class:`Timedelta` and `pandas.to_timedelta` that ignored `unit`-argument for string input (:issue:`12136`) Timezones ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index a239804ea7bc2..a5b502f3f4071 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -218,7 +218,7 @@ cdef convert_to_timedelta64(object ts, object unit): @cython.boundscheck(False) @cython.wraparound(False) -def array_to_timedelta64(object[:] values, unit='ns', errors='raise'): +def array_to_timedelta64(object[:] values, unit=None, errors='raise'): """ Convert an ndarray to an array of timedeltas. If errors == 'coerce', coerce non-convertible objects to NaT. Otherwise, raise. @@ -235,6 +235,13 @@ def array_to_timedelta64(object[:] values, unit='ns', errors='raise'): result = np.empty(n, dtype='m8[ns]') iresult = result.view('i8') + if unit is not None: + for i in range(n): + if isinstance(values[i], str): + raise ValueError( + "unit must not be specified if the input contains a str" + ) + # Usually, we have all strings. If so, we hit the fast path. # If this path fails, we try conversion a different way, and # this is where all of the error handling will take place. @@ -247,10 +254,10 @@ def array_to_timedelta64(object[:] values, unit='ns', errors='raise'): else: result[i] = parse_timedelta_string(values[i]) except (TypeError, ValueError): - unit = parse_timedelta_unit(unit) + parsed_unit = parse_timedelta_unit(unit or 'ns') for i in range(n): try: - result[i] = convert_to_timedelta64(values[i], unit) + result[i] = convert_to_timedelta64(values[i], parsed_unit) except ValueError: if errors == 'coerce': result[i] = NPY_NAT @@ -1155,6 +1162,8 @@ class Timedelta(_Timedelta): elif isinstance(value, _Timedelta): value = value.value elif isinstance(value, str): + if unit is not None: + raise ValueError("unit must not be specified if the value is a str") if len(value) > 0 and value[0] == 'P': value = parse_iso_format_string(value) else: diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index f439f07790274..d0657994dd81c 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -876,7 +876,7 @@ def f(x): # Constructor Helpers -def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): +def sequence_to_td64ns(data, copy=False, unit=None, errors="raise"): """ Parameters ---------- @@ -884,6 +884,7 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): copy : bool, default False unit : str, default "ns" The timedelta unit to treat integers as multiples of. + Must be un-specifed if the data contains a str. errors : {"raise", "coerce", "ignore"}, default "raise" How to handle elements that cannot be converted to timedelta64[ns]. See ``pandas.to_timedelta`` for details. @@ -906,7 +907,8 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): higher level. """ inferred_freq = None - unit = parse_timedelta_unit(unit) + if unit is not None: + unit = parse_timedelta_unit(unit) # Unwrap whatever we have into a np.ndarray if not hasattr(data, "dtype"): @@ -936,7 +938,7 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): # cast the unit, multiply base/frac separately # to avoid precision issues from float -> int mask = np.isnan(data) - m, p = precision_from_unit(unit) + m, p = precision_from_unit(unit or "ns") base = data.astype(np.int64) frac = data - base if p: @@ -1002,7 +1004,7 @@ def ints_to_td64ns(data, unit="ns"): return data, copy_made -def objects_to_td64ns(data, unit="ns", errors="raise"): +def objects_to_td64ns(data, unit=None, errors="raise"): """ Convert a object-dtyped or string-dtyped array into an timedelta64[ns]-dtyped array. @@ -1012,6 +1014,7 @@ def objects_to_td64ns(data, unit="ns", errors="raise"): data : ndarray or Index unit : str, default "ns" The timedelta unit to treat integers as multiples of. + Must not be specified if the data contains a str. errors : {"raise", "coerce", "ignore"}, default "raise" How to handle elements that cannot be converted to timedelta64[ns]. See ``pandas.to_timedelta`` for details. diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 15d9987310f18..001eb1789007f 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -200,7 +200,10 @@ def stringify(value): v = v.tz_convert("UTC") return TermValue(v, v.value, kind) elif kind == "timedelta64" or kind == "timedelta": - v = Timedelta(v, unit="s").value + if isinstance(v, str): + v = Timedelta(v).value + else: + v = Timedelta(v, unit="s").value return TermValue(int(v), v, kind) elif meta == "category": metadata = extract_array(self.metadata, extract_numpy=True) diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 51b404b46f321..87eac93a6072c 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -13,7 +13,7 @@ from pandas.core.arrays.timedeltas import sequence_to_td64ns -def to_timedelta(arg, unit="ns", errors="raise"): +def to_timedelta(arg, unit=None, errors="raise"): """ Convert argument to timedelta. @@ -27,6 +27,7 @@ def to_timedelta(arg, unit="ns", errors="raise"): arg : str, timedelta, list-like or Series The data to be converted to timedelta. unit : str, default 'ns' + Must not be specified if the arg is/contains a str. Denotes the unit of the arg. Possible values: ('W', 'D', 'days', 'day', 'hours', hour', 'hr', 'h', 'm', 'minute', 'min', 'minutes', 'T', 'S', 'seconds', @@ -76,7 +77,8 @@ def to_timedelta(arg, unit="ns", errors="raise"): TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) """ - unit = parse_timedelta_unit(unit) + if unit is not None: + unit = parse_timedelta_unit(unit) if errors not in ("ignore", "raise", "coerce"): raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'}") @@ -104,6 +106,9 @@ def to_timedelta(arg, unit="ns", errors="raise"): "arg must be a string, timedelta, list, tuple, 1-d array, or Series" ) + if isinstance(arg, str) and unit is not None: + raise ValueError("unit must not be specified if the input is/contains a str") + # ...so it must be a scalar value. Return scalar. return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors) @@ -124,7 +129,7 @@ def _coerce_scalar_to_timedelta_type(r, unit="ns", errors="raise"): return result -def _convert_listlike(arg, unit="ns", errors="raise", name=None): +def _convert_listlike(arg, unit=None, errors="raise", name=None): """Convert a list of objects to a timedelta index object.""" if isinstance(arg, (list, tuple)) or not hasattr(arg, "dtype"): # This is needed only to ensure that in the case where we end up diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py index c58994d738562..23fb25b838da6 100644 --- a/pandas/tests/scalar/timedelta/test_constructors.py +++ b/pandas/tests/scalar/timedelta/test_constructors.py @@ -289,3 +289,17 @@ def test_timedelta_constructor_identity(): expected = Timedelta(np.timedelta64(1, "s")) result = Timedelta(expected) assert result is expected + + +@pytest.mark.parametrize( + "constructor, value, unit, expectation", + [ + (Timedelta, "10s", "ms", (ValueError, "unit must not be specified")), + (to_timedelta, "10s", "ms", (ValueError, "unit must not be specified")), + (to_timedelta, ["1", 2, 3], "s", (ValueError, "unit must not be specified")), + ], +) +def test_string_with_unit(constructor, value, unit, expectation): + exp, match = expectation + with pytest.raises(exp, match=match): + _ = constructor(value, unit=unit)
- [x] closes #12136 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34634
2020-06-07T17:28:50Z
2020-06-08T22:10:04Z
2020-06-08T22:10:04Z
2020-06-08T22:10:15Z
Revert backport of #33632: Parquet & s3 I/O changes
diff --git a/pandas/io/common.py b/pandas/io/common.py index eaf4bcf203796..9617965915aa5 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -141,33 +141,6 @@ def urlopen(*args, **kwargs): return urllib.request.urlopen(*args, **kwargs) -def get_fs_for_path(filepath: str): - """ - Get appropriate filesystem given a filepath. - Supports s3fs, gcs and local file system. - - Parameters - ---------- - filepath : str - File path. e.g s3://bucket/object, /local/path, gcs://pandas/obj - - Returns - ------- - s3fs.S3FileSystem, gcsfs.GCSFileSystem, None - Appropriate FileSystem to use. None for local filesystem. - """ - if is_s3_url(filepath): - from pandas.io import s3 - - return s3.get_fs() - elif is_gcs_url(filepath): - from pandas.io import gcs - - return gcs.get_fs() - else: - return None - - def get_filepath_or_buffer( filepath_or_buffer: FilePathOrBuffer, encoding: Optional[str] = None, diff --git a/pandas/io/gcs.py b/pandas/io/gcs.py index d2d8fc2d2139f..1f5e0faedc6d2 100644 --- a/pandas/io/gcs.py +++ b/pandas/io/gcs.py @@ -6,10 +6,6 @@ ) -def get_fs(): - return gcsfs.GCSFileSystem() - - def get_filepath_or_buffer( filepath_or_buffer, encoding=None, compression=None, mode=None ): @@ -17,6 +13,6 @@ def get_filepath_or_buffer( if mode is None: mode = "rb" - fs = get_fs() + fs = gcsfs.GCSFileSystem() filepath_or_buffer = fs.open(filepath_or_buffer, mode) return filepath_or_buffer, None, compression, True diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 9c94c913e35cd..ff6e186947ebe 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -8,12 +8,7 @@ from pandas import DataFrame, get_option -from pandas.io.common import ( - get_filepath_or_buffer, - get_fs_for_path, - is_gcs_url, - is_s3_url, -) +from pandas.io.common import get_filepath_or_buffer, is_gcs_url, is_s3_url def get_engine(engine: str) -> "BaseImpl": @@ -97,15 +92,13 @@ def write( **kwargs, ): self.validate_dataframe(df) - file_obj_or_path, _, _, should_close = get_filepath_or_buffer(path, mode="wb") + path, _, _, should_close = get_filepath_or_buffer(path, mode="wb") from_pandas_kwargs: Dict[str, Any] = {"schema": kwargs.pop("schema", None)} if index is not None: from_pandas_kwargs["preserve_index"] = index table = self.api.Table.from_pandas(df, **from_pandas_kwargs) - # write_to_dataset does not support a file-like object when - # a dircetory path is used, so just pass the path string. if partition_cols is not None: self.api.parquet.write_to_dataset( table, @@ -118,20 +111,24 @@ def write( else: self.api.parquet.write_table( table, - file_obj_or_path, + path, compression=compression, coerce_timestamps=coerce_timestamps, **kwargs, ) if should_close: - file_obj_or_path.close() + path.close() def read(self, path, columns=None, **kwargs): - parquet_ds = self.api.parquet.ParquetDataset( - path, filesystem=get_fs_for_path(path), **kwargs - ) - kwargs["columns"] = columns - result = parquet_ds.read_pandas(**kwargs).to_pandas() + path, _, _, should_close = get_filepath_or_buffer(path) + + kwargs["use_pandas_metadata"] = True + result = self.api.parquet.read_table( + path, columns=columns, **kwargs + ).to_pandas() + if should_close: + path.close() + return result @@ -286,7 +283,7 @@ def read_parquet(path, engine: str = "auto", columns=None, **kwargs): A file URL can also be a path to a directory that contains multiple partitioned parquet files. Both pyarrow and fastparquet support paths to directories as well as file URLs. A directory path could be: - ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir`` + ``file://localhost/path/to/tables`` If you want to pass in a path object, pandas accepts any ``os.PathLike``. diff --git a/pandas/io/s3.py b/pandas/io/s3.py index 329c861d2386a..976c319f89d47 100644 --- a/pandas/io/s3.py +++ b/pandas/io/s3.py @@ -16,10 +16,6 @@ def _strip_schema(url): return result.netloc + result.path -def get_fs(): - return s3fs.S3FileSystem(anon=False) - - def get_file_and_filesystem( filepath_or_buffer: FilePathOrBuffer, mode: Optional[str] = None ) -> Tuple[IO, Any]: @@ -28,7 +24,7 @@ def get_file_and_filesystem( if mode is None: mode = "rb" - fs = get_fs() + fs = s3fs.S3FileSystem(anon=False) try: file = fs.open(_strip_schema(filepath_or_buffer), mode) except (FileNotFoundError, NoCredentialsError): @@ -38,7 +34,7 @@ def get_file_and_filesystem( # aren't valid for that bucket. # A NoCredentialsError is raised if you don't have creds # for that bucket. - fs = get_fs() + fs = s3fs.S3FileSystem(anon=True) file = fs.open(_strip_schema(filepath_or_buffer), mode) return file, fs diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index d1bdf1209a737..70a05b93c9cc3 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -1,6 +1,7 @@ """ test parquet compat """ import datetime from distutils.version import LooseVersion +import locale import os from warnings import catch_warnings @@ -129,7 +130,6 @@ def check_round_trip( read_kwargs=None, expected=None, check_names=True, - check_like=False, repeat=2, ): """Verify parquet serializer and deserializer produce the same results. @@ -149,8 +149,6 @@ def check_round_trip( Expected deserialization result, otherwise will be equal to `df` check_names: list of str, optional Closed set of column names to be compared - check_like: bool, optional - If True, ignore the order of index & columns. repeat: int, optional How many times to repeat the test """ @@ -171,9 +169,7 @@ def compare(repeat): with catch_warnings(record=True): actual = read_parquet(path, **read_kwargs) - tm.assert_frame_equal( - expected, actual, check_names=check_names, check_like=check_like - ) + tm.assert_frame_equal(expected, actual, check_names=check_names) if path is None: with tm.ensure_clean() as path: @@ -489,37 +485,15 @@ def test_categorical(self, pa): expected = df.astype(object) check_round_trip(df, pa, expected=expected) + # GH#33077 2020-03-27 + @pytest.mark.xfail( + locale.getlocale()[0] in ["zh_CN", "it_IT"], + reason="dateutil cannot parse e.g. '五, 27 3月 2020 21:45:38 GMT'", + ) def test_s3_roundtrip(self, df_compat, s3_resource, pa): # GH #19134 check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet") - @td.skip_if_no("s3fs") - @pytest.mark.parametrize("partition_col", [["A"], []]) - def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col): - from pandas.io.s3 import get_fs as get_s3_fs - - # GH #26388 - # https://github.com/apache/arrow/blob/master/python/pyarrow/tests/test_parquet.py#L2716 - # As per pyarrow partitioned columns become 'categorical' dtypes - # and are added to back of dataframe on read - - expected_df = df_compat.copy() - if partition_col: - expected_df[partition_col] = expected_df[partition_col].astype("category") - check_round_trip( - df_compat, - pa, - expected=expected_df, - path="s3://pandas-test/parquet_dir", - write_kwargs={ - "partition_cols": partition_col, - "compression": None, - "filesystem": get_s3_fs(), - }, - check_like=True, - repeat=1, - ) - def test_partition_cols_supported(self, pa, df_full): # GH #23283 partition_cols = ["bool", "int"]
Ref: https://github.com/pandas-dev/pandas/issues/34626#issuecomment-640225867 Revert backport of https://github.com/pandas-dev/pandas/pull/33632 Backport: https://github.com/pandas-dev/pandas/pull/34173 cc @jorisvandenbossche @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/34632
2020-06-07T15:43:26Z
2020-06-09T07:36:51Z
2020-06-09T07:36:51Z
2020-06-09T07:36:51Z
TYP: some type annotations for interpolate
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4b074924baaf2..714a332be2196 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6863,16 +6863,16 @@ def replace( @Appender(_shared_docs["interpolate"] % _shared_doc_kwargs) def interpolate( - self, - method="linear", - axis=0, - limit=None, - inplace=False, - limit_direction="forward", - limit_area=None, - downcast=None, + self: FrameOrSeries, + method: str = "linear", + axis: Axis = 0, + limit: Optional[int] = None, + inplace: bool_t = False, + limit_direction: str = "forward", + limit_area: Optional[str] = None, + downcast: Optional[str] = None, **kwargs, - ): + ) -> Optional[FrameOrSeries]: """ Interpolate values according to different methods. """ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index e70c8f9d5f09a..a4a8d672895ce 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1,7 +1,7 @@ from datetime import datetime, timedelta import inspect import re -from typing import Any, List +from typing import TYPE_CHECKING, Any, List, Optional import warnings import numpy as np @@ -83,6 +83,9 @@ import pandas.core.missing as missing from pandas.core.nanops import nanpercentile +if TYPE_CHECKING: + from pandas import Index + class Block(PandasObject): """ @@ -1066,16 +1069,16 @@ def coerce_to_target_dtype(self, other): def interpolate( self, - method="pad", - axis=0, - index=None, - inplace=False, - limit=None, - limit_direction="forward", - limit_area=None, - fill_value=None, - coerce=False, - downcast=None, + method: str = "pad", + axis: int = 0, + index: Optional["Index"] = None, + inplace: bool = False, + limit: Optional[int] = None, + limit_direction: str = "forward", + limit_area: Optional[str] = None, + fill_value: Optional[Any] = None, + coerce: bool = False, + downcast: Optional[str] = None, **kwargs, ): @@ -1115,6 +1118,9 @@ def check_int_bool(self, inplace): r = check_int_bool(self, inplace) if r is not None: return r + + assert index is not None # for mypy + return self._interpolate( method=m, index=index, @@ -1130,13 +1136,13 @@ def check_int_bool(self, inplace): def _interpolate_with_fill( self, - method="pad", - axis=0, - inplace=False, - limit=None, - fill_value=None, - coerce=False, - downcast=None, + method: str = "pad", + axis: int = 0, + inplace: bool = False, + limit: Optional[int] = None, + fill_value: Optional[Any] = None, + coerce: bool = False, + downcast: Optional[str] = None, ) -> List["Block"]: """ fillna but using the interpolate machinery """ inplace = validate_bool_kwarg(inplace, "inplace") @@ -1169,15 +1175,15 @@ def _interpolate_with_fill( def _interpolate( self, - method=None, - index=None, - fill_value=None, - axis=0, - limit=None, - limit_direction="forward", - limit_area=None, - inplace=False, - downcast=None, + method: str, + index: "Index", + fill_value: Optional[Any] = None, + axis: int = 0, + limit: Optional[int] = None, + limit_direction: str = "forward", + limit_area: Optional[str] = None, + inplace: bool = False, + downcast: Optional[str] = None, **kwargs, ) -> List["Block"]: """ interpolate using scipy wrappers """ @@ -1200,14 +1206,14 @@ def _interpolate( ) # process 1-d slices in the axis direction - def func(x): + def func(yvalues: np.ndarray) -> np.ndarray: # process a 1-d slice, returning it # should the axis argument be handled below in apply_along_axis? # i.e. not an arg to missing.interpolate_1d return missing.interpolate_1d( - index, - x, + xvalues=index, + yvalues=yvalues, method=method, limit=limit, limit_direction=limit_direction, diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 79bbef5fa5505..d8671616f944e 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -2,6 +2,8 @@ Routines for filling missing data. """ +from typing import Any, List, Optional, Set, Union + import numpy as np from pandas._libs import algos, lib @@ -92,7 +94,7 @@ def clean_fill_method(method, allow_nearest=False): return method -def clean_interp_method(method, **kwargs): +def clean_interp_method(method: str, **kwargs) -> str: order = kwargs.get("order") valid = [ "linear", @@ -160,15 +162,15 @@ def find_valid_index(values, how: str): def interpolate_1d( - xvalues, - yvalues, - method="linear", - limit=None, - limit_direction="forward", - limit_area=None, - fill_value=None, - bounds_error=False, - order=None, + xvalues: np.ndarray, + yvalues: np.ndarray, + method: Optional[str] = "linear", + limit: Optional[int] = None, + limit_direction: str = "forward", + limit_area: Optional[str] = None, + fill_value: Optional[Any] = None, + bounds_error: bool = False, + order: Optional[int] = None, **kwargs, ): """ @@ -238,6 +240,7 @@ def interpolate_1d( # are more than'limit' away from the prior non-NaN. # set preserve_nans based on direction using _interp_limit + preserve_nans: Union[List, Set] if limit_direction == "forward": preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0)) elif limit_direction == "backward":
pre-cursor to #34628
https://api.github.com/repos/pandas-dev/pandas/pulls/34631
2020-06-07T15:08:39Z
2020-06-07T20:07:21Z
2020-06-07T20:07:21Z
2020-06-08T08:32:06Z
TYP: some type annotations in core\tools\datetimes.py
diff --git a/pandas/_typing.py b/pandas/_typing.py index 71df27119bd96..4892abc5f6f51 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -1,4 +1,4 @@ -from datetime import datetime, timedelta +from datetime import datetime, timedelta, tzinfo from pathlib import Path from typing import ( IO, @@ -52,6 +52,7 @@ TimedeltaConvertibleTypes = Union[ "Timedelta", timedelta, np.timedelta64, int, np.int64, float, str ] +Timezone = Union[str, tzinfo] # other diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e1f0221eaee65..85625a8aea523 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -10,7 +10,7 @@ from pandas._libs.tslibs.frequencies import get_freq_group from pandas._libs.tslibs.offsets import prefix_mapping from pandas._typing import DtypeObj, Label -from pandas.util._decorators import cache_readonly +from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.common import ( DT64NS_DTYPE, @@ -64,9 +64,13 @@ def _new_DatetimeIndex(cls, d): @inherit_names( - ["to_period", "to_perioddelta", "to_julian_date", "strftime", "isocalendar"] + ["to_perioddelta", "to_julian_date", "strftime", "isocalendar"] + DatetimeArray._field_ops - + DatetimeArray._datetimelike_methods, + + [ + method + for method in DatetimeArray._datetimelike_methods + if method not in ("tz_localize",) + ], DatetimeArray, wrap=True, ) @@ -218,6 +222,21 @@ class DatetimeIndex(DatetimeTimedeltaMixin): _data: DatetimeArray tz: Optional[tzinfo] + # -------------------------------------------------------------------- + # methods that dispatch to array and wrap result in DatetimeIndex + + @doc(DatetimeArray.tz_localize) + def tz_localize( + self, tz, ambiguous="raise", nonexistent="raise" + ) -> "DatetimeIndex": + arr = self._data.tz_localize(tz, ambiguous, nonexistent) + return type(self)._simple_new(arr, name=self.name) + + @doc(DatetimeArray.to_period) + def to_period(self, freq=None) -> "DatetimeIndex": + arr = self._data.to_period(freq) + return type(self)._simple_new(arr, name=self.name) + # -------------------------------------------------------------------- # Constructors diff --git a/pandas/core/series.py b/pandas/core/series.py index 71ffdcbd40fe7..b51c08fa592d5 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4846,7 +4846,7 @@ def to_period(self, freq=None, copy=True) -> "Series": if not isinstance(self.index, DatetimeIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") - new_index = self.index.to_period(freq=freq) # type: ignore + new_index = self.index.to_period(freq=freq) return self._constructor(new_values, index=new_index).__finalize__( self, method="to_period" ) diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 42bffa0374472..0adab143f6052 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -2,7 +2,16 @@ from datetime import datetime from functools import partial from itertools import islice -from typing import TYPE_CHECKING, Optional, TypeVar, Union +from typing import ( + TYPE_CHECKING, + Callable, + List, + Optional, + Tuple, + TypeVar, + Union, + overload, +) import warnings import numpy as np @@ -15,7 +24,7 @@ _guess_datetime_format, ) from pandas._libs.tslibs.strptime import array_strptime -from pandas._typing import ArrayLike +from pandas._typing import ArrayLike, Label, Timezone from pandas.core.dtypes.common import ( ensure_object, @@ -45,16 +54,15 @@ if TYPE_CHECKING: from pandas import Series # noqa:F401 + from pandas._libs.tslibs.nattype import NaTType # noqa:F401 # --------------------------------------------------------------------- # types used in annotations -ArrayConvertible = Union[list, tuple, ArrayLike, "Series"] +ArrayConvertible = Union[List, Tuple, ArrayLike, "Series"] Scalar = Union[int, float, str] DatetimeScalar = TypeVar("DatetimeScalar", Scalar, datetime) -DatetimeScalarOrArrayConvertible = Union[ - DatetimeScalar, list, tuple, ArrayLike, "Series" -] +DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible] # --------------------------------------------------------------------- @@ -123,7 +131,12 @@ def should_cache( return do_caching -def _maybe_cache(arg, format, cache, convert_listlike): +def _maybe_cache( + arg: ArrayConvertible, + format: Optional[str], + cache: bool, + convert_listlike: Callable, +) -> "Series": """ Create a cache of unique dates from an array of dates @@ -159,7 +172,7 @@ def _maybe_cache(arg, format, cache, convert_listlike): def _box_as_indexlike( - dt_array: ArrayLike, utc: Optional[bool] = None, name: Optional[str] = None + dt_array: ArrayLike, utc: Optional[bool] = None, name: Label = None ) -> Index: """ Properly boxes the ndarray of datetimes to DatetimeIndex @@ -244,15 +257,15 @@ def _return_parsed_timezone_results(result, timezones, tz, name): def _convert_listlike_datetimes( arg, - format, - name=None, - tz=None, - unit=None, - errors=None, - infer_datetime_format=None, - dayfirst=None, - yearfirst=None, - exact=None, + format: Optional[str], + name: Label = None, + tz: Optional[Timezone] = None, + unit: Optional[str] = None, + errors: Optional[str] = None, + infer_datetime_format: Optional[bool] = None, + dayfirst: Optional[bool] = None, + yearfirst: Optional[bool] = None, + exact: Optional[bool] = None, ): """ Helper function for to_datetime. Performs the conversions of 1D listlike @@ -306,9 +319,7 @@ def _convert_listlike_datetimes( pass elif tz: # DatetimeArray, DatetimeIndex - # error: Item "DatetimeIndex" of "Union[DatetimeArray, DatetimeIndex]" has - # no attribute "tz_localize" - return arg.tz_localize(tz) # type: ignore + return arg.tz_localize(tz) return arg @@ -539,19 +550,70 @@ def _adjust_to_origin(arg, origin, unit): return arg +@overload def to_datetime( - arg, - errors="raise", - dayfirst=False, - yearfirst=False, - utc=None, - format=None, - exact=True, - unit=None, - infer_datetime_format=False, + arg: DatetimeScalar, + errors: str = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: Optional[bool] = ..., + format: Optional[str] = ..., + exact: bool = ..., + unit: Optional[str] = ..., + infer_datetime_format: bool = ..., + origin=..., + cache: bool = ..., +) -> Union[DatetimeScalar, "NaTType"]: + ... + + +@overload +def to_datetime( + arg: "Series", + errors: str = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: Optional[bool] = ..., + format: Optional[str] = ..., + exact: bool = ..., + unit: Optional[str] = ..., + infer_datetime_format: bool = ..., + origin=..., + cache: bool = ..., +) -> "Series": + ... + + +@overload +def to_datetime( + arg: Union[List, Tuple], + errors: str = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: Optional[bool] = ..., + format: Optional[str] = ..., + exact: bool = ..., + unit: Optional[str] = ..., + infer_datetime_format: bool = ..., + origin=..., + cache: bool = ..., +) -> DatetimeIndex: + ... + + +def to_datetime( + arg: DatetimeScalarOrArrayConvertible, + errors: str = "raise", + dayfirst: bool = False, + yearfirst: bool = False, + utc: Optional[bool] = None, + format: Optional[str] = None, + exact: bool = True, + unit: Optional[str] = None, + infer_datetime_format: bool = False, origin="unix", - cache=True, -): + cache: bool = True, +) -> Union[DatetimeIndex, "Series", DatetimeScalar, "NaTType"]: """ Convert argument to datetime. @@ -746,8 +808,7 @@ def to_datetime( if not cache_array.empty: result = _convert_and_box_cache(arg, cache_array, name=arg.name) else: - convert_listlike = partial(convert_listlike, name=arg.name) - result = convert_listlike(arg, format) + result = convert_listlike(arg, format, name=arg.name) elif is_list_like(arg): try: cache_array = _maybe_cache(arg, format, cache, convert_listlike) diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 739c77d1c0b99..be86b57ca2066 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -1,4 +1,4 @@ -from typing import List +from typing import List, cast from pandas._typing import FilePathOrBuffer, Scalar from pandas.compat._optional import import_optional_dependency @@ -179,7 +179,9 @@ def _get_cell_value(self, cell, convert_float: bool) -> Scalar: cell_value = cell.attributes.get((OFFICENS, "date-value")) return pd.to_datetime(cell_value) elif cell_type == "time": - return pd.to_datetime(str(cell)).time() + result = pd.to_datetime(str(cell)) + result = cast(pd.Timestamp, result) + return result.time() else: raise ValueError(f"Unrecognized type {cell_type}")
https://api.github.com/repos/pandas-dev/pandas/pulls/34630
2020-06-07T14:17:17Z
2020-06-10T09:23:17Z
2020-06-10T09:23:17Z
2020-06-10T09:25:02Z
TST #24444 added tests
diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 42bd20fd9640b..3e769b577582a 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -649,6 +649,26 @@ def test_to_timestamp_business_end(self): expected = pd.Timestamp("1990-01-06") - pd.Timedelta(nanoseconds=1) assert result == expected + @pytest.mark.parametrize( + "ts, expected", + [ + ("1970-01-01 00:00:00", 0), + ("1970-01-01 00:00:00.000001", 1), + ("1970-01-01 00:00:00.00001", 10), + ("1970-01-01 00:00:00.499", 499000), + ("1999-12-31 23:59:59.999", 999000), + ("1999-12-31 23:59:59.999999", 999999), + ("2050-12-31 23:59:59.5", 500000), + ("2050-12-31 23:59:59.500001", 500001), + ("2050-12-31 23:59:59.123456", 123456), + ], + ) + @pytest.mark.parametrize("freq", [None, "us", "ns"]) + def test_to_timestamp_microsecond(self, ts, expected, freq): + # GH 24444 + result = Period(ts).to_timestamp(freq=freq).microsecond + assert result == expected + # -------------------------------------------------------------- # Rendering: __repr__, strftime, etc
At last, there are no issues at all. I've made a new PR. - [x] closes #24444 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/34627
2020-06-07T08:53:04Z
2020-06-07T17:19:57Z
2020-06-07T17:19:57Z
2020-06-07T18:53:01Z
REF: re-use existing conversion functions
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 5c890c7fbf59d..32acbcfb39b50 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -763,10 +763,9 @@ cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil: period_ordinal : int64_t """ cdef: - int64_t unix_date, seconds, delta - int64_t weeks - int64_t day_adj + int64_t unix_date int freq_group, fmonth, mdiff + NPY_DATETIMEUNIT unit freq_group = get_freq_group(freq) @@ -789,44 +788,42 @@ cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil: mdiff = dts.month - fmonth + 12 return (dts.year - 1970) * 4 + (mdiff - 1) // 3 - elif freq == FR_MTH: - return (dts.year - 1970) * 12 + dts.month - 1 - - unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, dts) - - if freq >= FR_SEC: - seconds = unix_date * 86400 + dts.hour * 3600 + dts.min * 60 + dts.sec - - if freq == FR_MS: - return seconds * 1000 + dts.us // 1000 - - elif freq == FR_US: - return seconds * 1000000 + dts.us - - elif freq == FR_NS: - return (seconds * 1000000000 + - dts.us * 1000 + dts.ps // 1000) + elif freq_group == FR_WK: + unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, dts) + return unix_date_to_week(unix_date, freq - FR_WK) - else: - return seconds + elif freq == FR_BUS: + unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, dts) + return DtoB(dts, 0, unix_date) - elif freq == FR_MIN: - return unix_date * 1440 + dts.hour * 60 + dts.min + unit = get_unit(freq) + return npy_datetimestruct_to_datetime(unit, dts) - elif freq == FR_HR: - return unix_date * 24 + dts.hour +cdef NPY_DATETIMEUNIT get_unit(int freq) nogil: + """ + Convert the freq to the corresponding NPY_DATETIMEUNIT to pass + to npy_datetimestruct_to_datetime. + """ + if freq == FR_MTH: + return NPY_DATETIMEUNIT.NPY_FR_M elif freq == FR_DAY: - return unix_date - + return NPY_DATETIMEUNIT.NPY_FR_D + elif freq == FR_HR: + return NPY_DATETIMEUNIT.NPY_FR_h + elif freq == FR_MIN: + return NPY_DATETIMEUNIT.NPY_FR_m + elif freq == FR_SEC: + return NPY_DATETIMEUNIT.NPY_FR_s + elif freq == FR_MS: + return NPY_DATETIMEUNIT.NPY_FR_ms + elif freq == FR_US: + return NPY_DATETIMEUNIT.NPY_FR_us + elif freq == FR_NS: + return NPY_DATETIMEUNIT.NPY_FR_ns elif freq == FR_UND: - return unix_date - - elif freq == FR_BUS: - return DtoB(dts, 0, unix_date) - - elif freq_group == FR_WK: - return unix_date_to_week(unix_date, freq - FR_WK) + # Default to Day + return NPY_DATETIMEUNIT.NPY_FR_D cdef void get_date_info(int64_t ordinal, int freq, npy_datetimestruct *dts) nogil:
https://api.github.com/repos/pandas-dev/pandas/pulls/34625
2020-06-06T22:38:16Z
2020-06-08T16:09:38Z
2020-06-08T16:09:38Z
2020-06-08T16:11:14Z
DOC: updated core/arrays/base.py for PR08 errors
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index b5e917bafca7e..79f0039a9df65 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -512,7 +512,7 @@ def argsort( kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. *args, **kwargs: - passed through to :func:`numpy.argsort`. + Passed through to :func:`numpy.argsort`. Returns -------
https://api.github.com/repos/pandas-dev/pandas/pulls/34624
2020-06-06T18:42:01Z
2020-06-06T22:23:52Z
2020-06-06T22:23:52Z
2020-06-06T22:23:59Z
TST/REF: arithmetic tests for BooleanArray + consolidate with integer masked tests
diff --git a/pandas/_testing.py b/pandas/_testing.py index 61eab6b8152e1..ebb53dd81682c 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -4,6 +4,7 @@ from datetime import datetime from functools import wraps import gzip +import operator import os from shutil import rmtree import string @@ -2758,3 +2759,28 @@ def get_cython_table_params(ndframe, func_names_and_expected): if name == func_name ] return results + + +def get_op_from_name(op_name: str) -> Callable: + """ + The operator function for a given op name. + + Parameters + ---------- + op_name : string + The op name, in form of "add" or "__add__". + + Returns + ------- + function + A function performing the operation. + """ + short_opname = op_name.strip("_") + try: + op = getattr(operator, short_opname) + except AttributeError: + # Assume it is the reverse operator + rop = getattr(operator, short_opname[1:]) + op = lambda x, y: rop(y, x) + + return op diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index 5d791ffd20f01..9f1c2c6e668ad 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -717,11 +717,22 @@ def boolean_arithmetic_method(self, other): # nans propagate if mask is None: mask = self._mask + if other is libmissing.NA: + mask |= True else: mask = self._mask | mask - with np.errstate(all="ignore"): - result = op(self._data, other) + if other is libmissing.NA: + # if other is NA, the result will be all NA and we can't run the + # actual op, so we need to choose the resulting dtype manually + if op_name in {"floordiv", "rfloordiv", "mod", "rmod", "pow", "rpow"}: + dtype = "int8" + else: + dtype = "bool" + result = np.zeros(len(self._data), dtype=dtype) + else: + with np.errstate(all="ignore"): + result = op(self._data, other) # divmod returns a tuple if op_name == "divmod": diff --git a/pandas/tests/arrays/boolean/test_arithmetic.py b/pandas/tests/arrays/boolean/test_arithmetic.py index df4c218cbf9bf..1a4ab9799e8e5 100644 --- a/pandas/tests/arrays/boolean/test_arithmetic.py +++ b/pandas/tests/arrays/boolean/test_arithmetic.py @@ -1,8 +1,10 @@ +import operator + import numpy as np import pytest import pandas as pd -from pandas.tests.extension.base import BaseOpsUtil +import pandas._testing as tm @pytest.fixture @@ -13,30 +15,87 @@ def data(): ) -class TestArithmeticOps(BaseOpsUtil): - def test_error(self, data, all_arithmetic_operators): - # invalid ops +@pytest.fixture +def left_array(): + return pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") - op = all_arithmetic_operators - s = pd.Series(data) - ops = getattr(s, op) - opa = getattr(data, op) - # invalid scalars - with pytest.raises(TypeError): - ops("foo") - with pytest.raises(TypeError): - ops(pd.Timestamp("20180101")) +@pytest.fixture +def right_array(): + return pd.array([True, False, None] * 3, dtype="boolean") + - # invalid array-likes - if op not in ("__mul__", "__rmul__"): - # TODO(extension) numpy's mul with object array sees booleans as numbers - with pytest.raises(TypeError): - ops(pd.Series("foo", index=s.index)) +# Basic test for the arithmetic array ops +# ----------------------------------------------------------------------------- - # 2d - result = opa(pd.DataFrame({"A": s})) - assert result is NotImplemented - with pytest.raises(NotImplementedError): - opa(np.arange(len(s)).reshape(-1, len(s))) +@pytest.mark.parametrize( + "opname, exp", + [ + ("add", [True, True, None, True, False, None, None, None, None]), + ("mul", [True, False, None, False, False, None, None, None, None]), + ], + ids=["add", "mul"], +) +def test_add_mul(left_array, right_array, opname, exp): + op = getattr(operator, opname) + result = op(left_array, right_array) + expected = pd.array(exp, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_sub(left_array, right_array): + with pytest.raises(TypeError): + # numpy points to ^ operator or logical_xor function instead + left_array - right_array + + +def test_div(left_array, right_array): + # for now division gives a float numpy array + result = left_array / right_array + expected = np.array( + [1.0, np.inf, np.nan, 0.0, np.nan, np.nan, np.nan, np.nan, np.nan], + dtype="float64", + ) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "opname", + [ + "floordiv", + "mod", + pytest.param( + "pow", marks=pytest.mark.xfail(reason="TODO follow int8 behaviour? GH34686") + ), + ], +) +def test_op_int8(left_array, right_array, opname): + op = getattr(operator, opname) + result = op(left_array, right_array) + expected = op(left_array.astype("Int8"), right_array.astype("Int8")) + tm.assert_extension_array_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_values(data, all_arithmetic_operators): + # invalid ops + + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + + # invalid scalars + with pytest.raises(TypeError): + ops("foo") + with pytest.raises(TypeError): + ops(pd.Timestamp("20180101")) + + # invalid array-likes + if op not in ("__mul__", "__rmul__"): + # TODO(extension) numpy's mul with object array sees booleans as numbers + with pytest.raises(TypeError): + ops(pd.Series("foo", index=s.index)) diff --git a/pandas/tests/arrays/integer/test_arithmetic.py b/pandas/tests/arrays/integer/test_arithmetic.py index a6c47f3192175..d309f6423e0c1 100644 --- a/pandas/tests/arrays/integer/test_arithmetic.py +++ b/pandas/tests/arrays/integer/test_arithmetic.py @@ -5,23 +5,9 @@ import pandas as pd import pandas._testing as tm -from pandas.core.arrays import ExtensionArray, integer_array +from pandas.core.arrays import integer_array import pandas.core.ops as ops - -# TODO need to use existing utility function or move this somewhere central -def get_op_from_name(op_name): - short_opname = op_name.strip("_") - try: - op = getattr(operator, short_opname) - except AttributeError: - # Assume it is the reverse operator - rop = getattr(operator, short_opname[1:]) - op = lambda x, y: rop(y, x) - - return op - - # Basic test for the arithmetic array ops # ----------------------------------------------------------------------------- @@ -151,55 +137,6 @@ def test_rpow_one_to_na(): tm.assert_numpy_array_equal(result, expected) -# Test equivalence of scalars, numpy arrays with array ops -# ----------------------------------------------------------------------------- - - -def test_array_scalar_like_equivalence(data, all_arithmetic_operators): - op = get_op_from_name(all_arithmetic_operators) - - scalar = 2 - scalar_array = pd.array([2] * len(data), dtype=data.dtype) - - # TODO also add len-1 array (np.array([2], dtype=data.dtype.numpy_dtype)) - for scalar in [2, data.dtype.type(2)]: - result = op(data, scalar) - expected = op(data, scalar_array) - if isinstance(expected, ExtensionArray): - tm.assert_extension_array_equal(result, expected) - else: - # TODO div still gives float ndarray -> remove this once we have Float EA - tm.assert_numpy_array_equal(result, expected) - - -def test_array_NA(data, all_arithmetic_operators): - if "truediv" in all_arithmetic_operators: - pytest.skip("division with pd.NA raises") - op = get_op_from_name(all_arithmetic_operators) - - scalar = pd.NA - scalar_array = pd.array([pd.NA] * len(data), dtype=data.dtype) - - result = op(data, scalar) - expected = op(data, scalar_array) - tm.assert_extension_array_equal(result, expected) - - -def test_numpy_array_equivalence(data, all_arithmetic_operators): - op = get_op_from_name(all_arithmetic_operators) - - numpy_array = np.array([2] * len(data), dtype=data.dtype.numpy_dtype) - pd_array = pd.array(numpy_array, dtype=data.dtype) - - result = op(data, numpy_array) - expected = op(data, pd_array) - if isinstance(expected, ExtensionArray): - tm.assert_extension_array_equal(result, expected) - else: - # TODO div still gives float ndarray -> remove this once we have Float EA - tm.assert_numpy_array_equal(result, expected) - - @pytest.mark.parametrize("other", [0, 0.5]) def test_numpy_zero_dim_ndarray(other): arr = integer_array([1, None, 2]) @@ -208,53 +145,7 @@ def test_numpy_zero_dim_ndarray(other): tm.assert_equal(result, expected) -# Test equivalence with Series and DataFrame ops -# ----------------------------------------------------------------------------- - - -def test_frame(data, all_arithmetic_operators): - op = get_op_from_name(all_arithmetic_operators) - - # DataFrame with scalar - df = pd.DataFrame({"A": data}) - scalar = 2 - - result = op(df, scalar) - expected = pd.DataFrame({"A": op(data, scalar)}) - tm.assert_frame_equal(result, expected) - - -def test_series(data, all_arithmetic_operators): - op = get_op_from_name(all_arithmetic_operators) - - s = pd.Series(data) - - # Series with scalar - scalar = 2 - result = op(s, scalar) - expected = pd.Series(op(data, scalar)) - tm.assert_series_equal(result, expected) - - # Series with np.ndarray - other = np.ones(len(data), dtype=data.dtype.type) - result = op(s, other) - expected = pd.Series(op(data, other)) - tm.assert_series_equal(result, expected) - - # Series with pd.array - other = pd.array(np.ones(len(data)), dtype=data.dtype) - result = op(s, other) - expected = pd.Series(op(data, other)) - tm.assert_series_equal(result, expected) - - # Series with Series - other = pd.Series(np.ones(len(data)), dtype=data.dtype) - result = op(s, other) - expected = pd.Series(op(data, other.array)) - tm.assert_series_equal(result, expected) - - -# Test generic charachteristics / errors +# Test generic characteristics / errors # ----------------------------------------------------------------------------- @@ -291,35 +182,6 @@ def test_error_invalid_values(data, all_arithmetic_operators): ops(pd.Series(pd.date_range("20180101", periods=len(s)))) -def test_error_invalid_object(data, all_arithmetic_operators): - - op = all_arithmetic_operators - opa = getattr(data, op) - - # 2d -> return NotImplemented - result = opa(pd.DataFrame({"A": data})) - assert result is NotImplemented - - msg = r"can only perform ops with 1-d structures" - with pytest.raises(NotImplementedError, match=msg): - opa(np.arange(len(data)).reshape(-1, len(data))) - - -def test_error_len_mismatch(all_arithmetic_operators): - # operating with a list-like with non-matching length raises - op = get_op_from_name(all_arithmetic_operators) - - data = pd.array([1, 2, 3], dtype="Int64") - - for other in [[1, 2], np.array([1.0, 2.0])]: - with pytest.raises(ValueError, match="Lengths must match"): - op(data, other) - - s = pd.Series(data) - with pytest.raises(ValueError, match="Lengths must match"): - op(s, other) - - # Various # ----------------------------------------------------------------------------- @@ -328,7 +190,7 @@ def test_error_len_mismatch(all_arithmetic_operators): def test_arith_coerce_scalar(data, all_arithmetic_operators): - op = get_op_from_name(all_arithmetic_operators) + op = tm.get_op_from_name(all_arithmetic_operators) s = pd.Series(data) other = 0.01 @@ -345,7 +207,7 @@ def test_arith_coerce_scalar(data, all_arithmetic_operators): def test_arithmetic_conversion(all_arithmetic_operators, other): # if we have a float operand we should have a float result # if that is equal to an integer - op = get_op_from_name(all_arithmetic_operators) + op = tm.get_op_from_name(all_arithmetic_operators) s = pd.Series([1, 2, 3], dtype="Int64") result = op(s, other) diff --git a/pandas/tests/arrays/masked/test_arithmetic.py b/pandas/tests/arrays/masked/test_arithmetic.py new file mode 100644 index 0000000000000..db938c36fe7ae --- /dev/null +++ b/pandas/tests/arrays/masked/test_arithmetic.py @@ -0,0 +1,158 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ExtensionArray + +arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES] +scalars = [2] * len(arrays) +arrays += [pd.array([True, False, True, None], dtype="boolean")] +scalars += [False] + + +@pytest.fixture(params=zip(arrays, scalars), ids=[a.dtype.name for a in arrays]) +def data(request): + return request.param + + +def check_skip(data, op_name): + if isinstance(data.dtype, pd.BooleanDtype) and "sub" in op_name: + pytest.skip("subtract not implemented for boolean") + + +# Test equivalence of scalars, numpy arrays with array ops +# ----------------------------------------------------------------------------- + + +def test_array_scalar_like_equivalence(data, all_arithmetic_operators): + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + scalar_array = pd.array([scalar] * len(data), dtype=data.dtype) + + # TODO also add len-1 array (np.array([scalar], dtype=data.dtype.numpy_dtype)) + for scalar in [scalar, data.dtype.type(scalar)]: + result = op(data, scalar) + expected = op(data, scalar_array) + if isinstance(expected, ExtensionArray): + tm.assert_extension_array_equal(result, expected) + else: + # TODO div still gives float ndarray -> remove this once we have Float EA + tm.assert_numpy_array_equal(result, expected) + + +def test_array_NA(data, all_arithmetic_operators): + if "truediv" in all_arithmetic_operators: + pytest.skip("division with pd.NA raises") + data, _ = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + scalar = pd.NA + scalar_array = pd.array([pd.NA] * len(data), dtype=data.dtype) + + result = op(data, scalar) + expected = op(data, scalar_array) + tm.assert_extension_array_equal(result, expected) + + +def test_numpy_array_equivalence(data, all_arithmetic_operators): + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + numpy_array = np.array([scalar] * len(data), dtype=data.dtype.numpy_dtype) + pd_array = pd.array(numpy_array, dtype=data.dtype) + + result = op(data, numpy_array) + expected = op(data, pd_array) + if isinstance(expected, ExtensionArray): + tm.assert_extension_array_equal(result, expected) + else: + # TODO div still gives float ndarray -> remove this once we have Float EA + tm.assert_numpy_array_equal(result, expected) + + +# Test equivalence with Series and DataFrame ops +# ----------------------------------------------------------------------------- + + +def test_frame(data, all_arithmetic_operators): + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + # DataFrame with scalar + df = pd.DataFrame({"A": data}) + + result = op(df, scalar) + expected = pd.DataFrame({"A": op(data, scalar)}) + tm.assert_frame_equal(result, expected) + + +def test_series(data, all_arithmetic_operators): + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + s = pd.Series(data) + + # Series with scalar + result = op(s, scalar) + expected = pd.Series(op(data, scalar)) + tm.assert_series_equal(result, expected) + + # Series with np.ndarray + other = np.array([scalar] * len(data), dtype=data.dtype.numpy_dtype) + result = op(s, other) + expected = pd.Series(op(data, other)) + tm.assert_series_equal(result, expected) + + # Series with pd.array + other = pd.array([scalar] * len(data), dtype=data.dtype) + result = op(s, other) + expected = pd.Series(op(data, other)) + tm.assert_series_equal(result, expected) + + # Series with Series + other = pd.Series([scalar] * len(data), dtype=data.dtype) + result = op(s, other) + expected = pd.Series(op(data, other.array)) + tm.assert_series_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_object(data, all_arithmetic_operators): + data, _ = data + + op = all_arithmetic_operators + opa = getattr(data, op) + + # 2d -> return NotImplemented + result = opa(pd.DataFrame({"A": data})) + assert result is NotImplemented + + msg = r"can only perform ops with 1-d structures" + with pytest.raises(NotImplementedError, match=msg): + opa(np.arange(len(data)).reshape(-1, len(data))) + + +def test_error_len_mismatch(data, all_arithmetic_operators): + # operating with a list-like with non-matching length raises + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + + other = [scalar] * (len(data) - 1) + + for other in [other, np.array(other)]: + with pytest.raises(ValueError, match="Lengths must match"): + op(data, other) + + s = pd.Series(data) + with pytest.raises(ValueError, match="Lengths must match"): + op(s, other) diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index 188893c8b067c..359acf230ce14 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -1,9 +1,9 @@ -import operator from typing import Optional, Type import pytest import pandas as pd +import pandas._testing as tm from pandas.core import ops from .base import BaseExtensionTests @@ -11,15 +11,7 @@ class BaseOpsUtil(BaseExtensionTests): def get_op_from_name(self, op_name): - short_opname = op_name.strip("_") - try: - op = getattr(operator, short_opname) - except AttributeError: - # Assume it is the reverse operator - rop = getattr(operator, short_opname[1:]) - op = lambda x, y: rop(y, x) - - return op + return tm.get_op_from_name(op_name) def check_opname(self, s, op_name, other, exc=Exception): op = self.get_op_from_name(op_name)
Follow-up on https://github.com/pandas-dev/pandas/pull/34454 - Move a bunch of tests from `integer/test_arithmetic.py` to `masked/test_arithmetic.py` so those can be shared with boolean (and once merged I can add float as well in the FloatingArray PR) - Added some more arithmetic tests to `boolean/test_arithmetic.py` to follow the same pattern as we have in `integer/test_arithmetic.py`. - The commong integer/boolean tests uncovered a bug in the BooleanArray implementation when dealing with a `pd.NA` scalar as other operand.
https://api.github.com/repos/pandas-dev/pandas/pulls/34623
2020-06-06T18:32:47Z
2020-06-14T15:18:10Z
2020-06-14T15:18:10Z
2020-06-14T15:18:13Z
TST: boolean indexing using .iloc #20627
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index c97cd81c84726..c5f40102874dd 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -705,6 +705,25 @@ def test_iloc_setitem_categorical_updates_inplace(self): expected = pd.Categorical(["C", "B", "A"]) tm.assert_categorical_equal(cat, expected) + def test_iloc_with_boolean_operation(self): + # GH 20627 + result = DataFrame([[0, 1], [2, 3], [4, 5], [6, np.nan]]) + result.iloc[result.index <= 2] *= 2 + expected = DataFrame([[0, 2], [4, 6], [8, 10], [6, np.nan]]) + tm.assert_frame_equal(result, expected) + + result.iloc[result.index > 2] *= 2 + expected = DataFrame([[0, 2], [4, 6], [8, 10], [12, np.nan]]) + tm.assert_frame_equal(result, expected) + + result.iloc[[True, True, False, False]] *= 2 + expected = DataFrame([[0, 4], [8, 12], [8, 10], [12, np.nan]]) + tm.assert_frame_equal(result, expected) + + result.iloc[[False, False, True, True]] /= 2 + expected = DataFrame([[0.0, 4.0], [8.0, 12.0], [4.0, 5.0], [6.0, np.nan]]) + tm.assert_frame_equal(result, expected) + class TestILocSetItemDuplicateColumns: def test_iloc_setitem_scalar_duplicate_columns(self):
- [x] closes #20627 - [1] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` A simple test was added to close this issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/34622
2020-06-06T18:24:00Z
2020-06-13T19:58:21Z
2020-06-13T19:58:21Z
2021-01-02T08:31:59Z
DOC: docstring, closes #23475
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e88a20bc549bd..5c890c7fbf59d 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1529,7 +1529,16 @@ cdef class _Period: self._dtype = PeriodPseudoDtype(freq._period_dtype_code) @classmethod - def _maybe_convert_freq(cls, object freq): + def _maybe_convert_freq(cls, object freq) -> BaseOffset: + """ + Internally we allow integer and tuple representations (for now) that + are not recognized by to_offset, so we convert them here. Also, a + Period's freq attribute must have `freq.n > 0`, which we check for here. + + Returns + ------- + DateOffset + """ if isinstance(freq, (int, tuple)): code, stride = get_freq_code(freq) freq = get_freq_str(code, stride)
- [x] closes #23475 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34619
2020-06-06T16:18:29Z
2020-06-07T20:11:13Z
2020-06-07T20:11:13Z
2020-06-07T20:34:38Z
Improve document for **kwargs argument of pandas.Series.to_markdown
diff --git a/pandas/core/series.py b/pandas/core/series.py index 6b5ed86027806..0ed29fcaaa081 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1422,6 +1422,21 @@ def to_string( | 1 | pig | | 2 | dog | | 3 | quetzal | + + Output markdown with a tabulate option. + + >>> print(s.to_markdown(tablefmt="grid")) + +----+----------+ + | | animal | + +====+==========+ + | 0 | elk | + +----+----------+ + | 1 | pig | + +----+----------+ + | 2 | dog | + +----+----------+ + | 3 | quetzal | + +----+----------+ """ ) @Substitution(klass="Series")
This is a follow-up PR of #34594 . `pandas.Series.to_markdown` has also same argument. This PR adds example for `pandas.Series.to_markdown` with tabulate option. Related Issue: #34568
https://api.github.com/repos/pandas-dev/pandas/pulls/34616
2020-06-06T05:38:54Z
2020-06-08T02:38:22Z
2020-06-08T02:38:22Z
2020-06-08T02:38:31Z
DOC: Clarify where to the additional arguments for some win_types
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index ce0a2a9b95025..9cd750265133e 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -922,16 +922,19 @@ class Window(_Window): * ``blackmanharris`` * ``nuttall`` * ``barthann`` - * ``kaiser`` (needs beta) - * ``gaussian`` (needs std) - * ``general_gaussian`` (needs power, width) - * ``slepian`` (needs width) - * ``exponential`` (needs tau), center is set to None. + * ``kaiser`` (needs parameter: beta) + * ``gaussian`` (needs parameter: std) + * ``general_gaussian`` (needs parameters: power, width) + * ``slepian`` (needs parameter: width) + * ``exponential`` (needs parameter: tau), center is set to None. If ``win_type=None`` all points are evenly weighted. To learn more about different window types see `scipy.signal window functions <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__. + Certain window types require additional parameters to be passed. Please see + the third example below on how to add the additional parameters. + Examples -------- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
For example, std needs to specify when win_types is gaussian. However, std should be specified in the operation argument, not as one of the rolling arguments. This change is to clarify this point. - [ ] closes #34593 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34615
2020-06-06T00:49:42Z
2020-06-18T00:11:58Z
2020-06-18T00:11:58Z
2020-06-18T00:12:10Z
CLN: update tslibs/tseries test locations/imports
diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index d4eb31168b20e..4df221913b805 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -1,6 +1,6 @@ import pytest -from pandas._libs.tslibs import to_offset +from pandas._libs.tslibs import Resolution, offsets, to_offset from pandas._libs.tslibs.frequencies import ( FreqGroup, _attrname_to_abbrevs, @@ -9,9 +9,6 @@ get_freq_group, get_to_timestamp_base, ) -from pandas._libs.tslibs.resolution import Resolution as _reso - -import pandas.tseries.offsets as offsets @pytest.fixture(params=list(_period_code_map.items())) @@ -103,19 +100,19 @@ def test_get_to_timestamp_base(freqstr, exp_freqstr): ], ) def test_get_attrname_from_abbrev(freqstr, expected): - assert _reso.get_reso_from_freq(freqstr).attrname == expected + assert Resolution.get_reso_from_freq(freqstr).attrname == expected @pytest.mark.parametrize("freq", ["A", "Q", "M"]) def test_get_freq_unsupported_(freq): # Lowest-frequency resolution is for Day with pytest.raises(KeyError, match=freq.lower()): - _reso.get_reso_from_freq(freq) + Resolution.get_reso_from_freq(freq) @pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U", "N"]) def test_get_freq_roundtrip2(freq): - obj = _reso.get_reso_from_freq(freq) + obj = Resolution.get_reso_from_freq(freq) result = _attrname_to_abbrevs[obj.attrname] assert freq == result diff --git a/pandas/tests/tseries/frequencies/test_frequencies.py b/pandas/tests/tseries/frequencies/test_frequencies.py new file mode 100644 index 0000000000000..0479de8e8e7c3 --- /dev/null +++ b/pandas/tests/tseries/frequencies/test_frequencies.py @@ -0,0 +1,26 @@ +import pytest + +from pandas._libs.tslibs import offsets + +from pandas.tseries.frequencies import is_subperiod, is_superperiod + + +@pytest.mark.parametrize( + "p1,p2,expected", + [ + # Input validation. + (offsets.MonthEnd(), None, False), + (offsets.YearEnd(), None, False), + (None, offsets.YearEnd(), False), + (None, offsets.MonthEnd(), False), + (None, None, False), + (offsets.YearEnd(), offsets.MonthEnd(), True), + (offsets.Hour(), offsets.Minute(), True), + (offsets.Second(), offsets.Milli(), True), + (offsets.Milli(), offsets.Micro(), True), + (offsets.Micro(), offsets.Nano(), True), + ], +) +def test_super_sub_symmetry(p1, p2, expected): + assert is_superperiod(p1, p2) is expected + assert is_subperiod(p2, p1) is expected diff --git a/pandas/tests/tslibs/test_libfrequencies.py b/pandas/tests/tslibs/test_libfrequencies.py index 65d3b15bb3dac..feaaaf6adca6f 100644 --- a/pandas/tests/tslibs/test_libfrequencies.py +++ b/pandas/tests/tslibs/test_libfrequencies.py @@ -4,7 +4,6 @@ from pandas._libs.tslibs.parsing import get_rule_month from pandas.tseries import offsets -from pandas.tseries.frequencies import is_subperiod, is_superperiod # TODO: move tests @pytest.mark.parametrize( @@ -56,27 +55,6 @@ def test_period_str_to_code(obj, expected): assert _period_str_to_code(obj) == expected -@pytest.mark.parametrize( - "p1,p2,expected", - [ - # Input validation. - (offsets.MonthEnd(), None, False), - (offsets.YearEnd(), None, False), - (None, offsets.YearEnd(), False), - (None, offsets.MonthEnd(), False), - (None, None, False), - (offsets.YearEnd(), offsets.MonthEnd(), True), - (offsets.Hour(), offsets.Minute(), True), - (offsets.Second(), offsets.Milli(), True), - (offsets.Milli(), offsets.Micro(), True), - (offsets.Micro(), offsets.Nano(), True), - ], -) -def test_super_sub_symmetry(p1, p2, expected): - assert is_superperiod(p1, p2) is expected - assert is_subperiod(p2, p1) is expected - - @pytest.mark.parametrize( "freq,expected,aliases", [ diff --git a/pandas/tests/tseries/frequencies/test_to_offset.py b/pandas/tests/tslibs/test_to_offset.py similarity index 100% rename from pandas/tests/tseries/frequencies/test_to_offset.py rename to pandas/tests/tslibs/test_to_offset.py
https://api.github.com/repos/pandas-dev/pandas/pulls/34614
2020-06-05T23:08:30Z
2020-06-08T02:27:08Z
2020-06-08T02:27:07Z
2020-06-08T02:30:39Z
REF: RelativeDeltaOffset.apply_index operate on ndarray
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 63136367a5b5c..63a4a7ce302e8 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -556,7 +556,7 @@ cdef class BaseOffset: # ------------------------------------------------------------------ @apply_index_wraps - def apply_index(self, index): + def apply_index(self, dtindex): """ Vectorized apply of DateOffset to DatetimeIndex, raises NotImplementedError for offsets without a @@ -1028,7 +1028,7 @@ cdef class RelativeDeltaOffset(BaseOffset): return other + timedelta(self.n) @apply_index_wraps - def apply_index(self, index): + def apply_index(self, dtindex): """ Vectorized apply of DateOffset to DatetimeIndex, raises NotImplementedError for offsets without a @@ -1040,8 +1040,9 @@ cdef class RelativeDeltaOffset(BaseOffset): Returns ------- - DatetimeIndex + ndarray[datetime64[ns]] """ + dt64other = np.asarray(dtindex) kwds = self.kwds relativedelta_fast = { "years", @@ -1058,12 +1059,12 @@ cdef class RelativeDeltaOffset(BaseOffset): months = (kwds.get("years", 0) * 12 + kwds.get("months", 0)) * self.n if months: - shifted = shift_months(index.asi8, months) - index = type(index)(shifted, dtype=index.dtype) + shifted = shift_months(dt64other.view("i8"), months) + dt64other = shifted.view("datetime64[ns]") weeks = kwds.get("weeks", 0) * self.n if weeks: - index = index + timedelta(days=7 * weeks) + dt64other = dt64other + Timedelta(days=7 * weeks) timedelta_kwds = { k: v @@ -1072,11 +1073,11 @@ cdef class RelativeDeltaOffset(BaseOffset): } if timedelta_kwds: delta = Timedelta(**timedelta_kwds) - index = index + (self.n * delta) - return index + dt64other = dt64other + (self.n * delta) + return dt64other elif not self._use_relativedelta and hasattr(self, "_offset"): # timedelta - return index + (self._offset * self.n) + return dt64other + Timedelta(self._offset * self.n) else: # relativedelta with other keywords kwd = set(kwds) - relativedelta_fast
https://api.github.com/repos/pandas-dev/pandas/pulls/34613
2020-06-05T23:01:28Z
2020-06-08T16:15:52Z
2020-06-08T16:15:52Z
2020-06-09T16:36:52Z
REF: make DateOffset apply_index methods operate on ndarrays where feasible
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 63136367a5b5c..28ead3593cf85 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -47,6 +47,7 @@ from pandas._libs.tslibs.timezones cimport utc_pytz as UTC from pandas._libs.tslibs.tzconversion cimport tz_convert_single from .dtypes cimport PeriodDtypeCode +from .fields import get_start_end_field from .timedeltas cimport delta_to_nanoseconds from .timedeltas import Timedelta from .timestamps cimport _Timestamp @@ -2291,7 +2292,7 @@ cdef class SemiMonthOffset(SingleConstructorOffset): after_day_of_month = days_from_start > delta # determine the correct n for each date in dtindex - roll = self._get_roll(dtindex, before_day_of_month, after_day_of_month) + roll = self._get_roll(i8other, before_day_of_month, after_day_of_month) # isolate the time since it will be striped away one the next line time = (i8other % DAY_NANOS).view("timedelta64[ns]") @@ -2304,24 +2305,26 @@ cdef class SemiMonthOffset(SingleConstructorOffset): shifted = asper._addsub_int_array(roll // 2, operator.add) dtindex = type(dti)(shifted.to_timestamp()) + dt64other = np.asarray(dtindex) # apply the correct day - dtindex = self._apply_index_days(dtindex, roll) + dt64result = self._apply_index_days(dt64other, roll) - return dtindex + time + return dt64result + time - def _get_roll(self, dtindex, before_day_of_month, after_day_of_month): + def _get_roll(self, i8other, before_day_of_month, after_day_of_month): """ Return an array with the correct n for each date in dtindex. The roll array is based on the fact that dtindex gets rolled back to the first day of the month. """ + # before_day_of_month and after_day_of_month are ndarray[bool] raise NotImplementedError - def _apply_index_days(self, dtindex, roll): + def _apply_index_days(self, dt64other, roll): """ - Apply the correct day for each date in dtindex. + Apply the correct day for each date in dt64other. """ raise NotImplementedError @@ -2352,9 +2355,10 @@ cdef class SemiMonthEnd(SemiMonthOffset): day = 31 if n % 2 else self.day_of_month return shift_month(other, months, day) - def _get_roll(self, dtindex, before_day_of_month, after_day_of_month): + def _get_roll(self, i8other, before_day_of_month, after_day_of_month): + # before_day_of_month and after_day_of_month are ndarray[bool] n = self.n - is_month_end = dtindex.is_month_end + is_month_end = get_start_end_field(i8other, "is_month_end") if n > 0: roll_end = np.where(is_month_end, 1, 0) roll_before = np.where(before_day_of_month, n, n + 1) @@ -2367,22 +2371,22 @@ cdef class SemiMonthEnd(SemiMonthOffset): roll = np.where(after_day_of_month, n + 2, n + 1) return roll - def _apply_index_days(self, dtindex, roll): + def _apply_index_days(self, dt64other, roll): """ - Add days portion of offset to DatetimeIndex dtindex. + Add days portion of offset to dt64other. Parameters ---------- - dtindex : DatetimeIndex + dt64other : ndarray[datetime64[ns]] roll : ndarray[int64_t] Returns ------- - result : DatetimeIndex + ndarray[datetime64[ns]] """ nanos = (roll % 2) * Timedelta(days=self.day_of_month).value - dtindex += nanos.astype("timedelta64[ns]") - return dtindex + Timedelta(days=-1) + dt64other += nanos.astype("timedelta64[ns]") + return dt64other + Timedelta(days=-1) cdef class SemiMonthBegin(SemiMonthOffset): @@ -2409,9 +2413,10 @@ cdef class SemiMonthBegin(SemiMonthOffset): day = 1 if n % 2 else self.day_of_month return shift_month(other, months, day) - def _get_roll(self, dtindex, before_day_of_month, after_day_of_month): + def _get_roll(self, i8other, before_day_of_month, after_day_of_month): + # before_day_of_month and after_day_of_month are ndarray[bool] n = self.n - is_month_start = dtindex.is_month_start + is_month_start = get_start_end_field(i8other, "is_month_start") if n > 0: roll = np.where(before_day_of_month, n, n + 1) elif n == 0: @@ -2424,21 +2429,21 @@ cdef class SemiMonthBegin(SemiMonthOffset): roll = roll_after + roll_start return roll - def _apply_index_days(self, dtindex, roll): + def _apply_index_days(self, dt64other, roll): """ - Add days portion of offset to DatetimeIndex dtindex. + Add days portion of offset to dt64other. Parameters ---------- - dtindex : DatetimeIndex + dt64other : ndarray[datetime64[ns]] roll : ndarray[int64_t] Returns ------- - result : DatetimeIndex + ndarray[datetime64[ns]] """ nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value - return dtindex + nanos.astype("timedelta64[ns]") + return dt64other + nanos.astype("timedelta64[ns]") # ---------------------------------------------------------------------
https://api.github.com/repos/pandas-dev/pandas/pulls/34612
2020-06-05T22:01:04Z
2020-06-08T01:59:48Z
2020-06-08T01:59:47Z
2020-06-08T02:05:24Z
Add ddof to cov methods
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 14d1e1b49a726..59c4a22dcefff 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -294,6 +294,7 @@ Other enhancements - :meth:`~pandas.io.json.read_json` now accepts `nrows` parameter. (:issue:`33916`). - :meth:`~pandas.io.gbq.read_gbq` now allows to disable progress bar (:issue:`33360`). - :meth:`~pandas.io.gbq.read_gbq` now supports the ``max_results`` kwarg from ``pandas-gbq`` (:issue:`34639`). +- :meth:`Dataframe.cov` and :meth:`Series.cov` now support a new parameter ddof to support delta degrees of freedom as in the corresponding numpy methods (:issue:`34611`). - :meth:`DataFrame.to_html` and :meth:`DataFrame.to_string`'s ``col_space`` parameter now accepts a list of dict to change only some specific columns' width (:issue:`28917`). .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2c80f57e4ef5d..c23f7718e59fb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8134,7 +8134,9 @@ def corr(self, method="pearson", min_periods=1) -> "DataFrame": return self._constructor(correl, index=idx, columns=cols) - def cov(self, min_periods=None) -> "DataFrame": + def cov( + self, min_periods: Optional[int] = None, ddof: Optional[int] = 1 + ) -> "DataFrame": """ Compute pairwise covariance of columns, excluding NA/null values. @@ -8159,6 +8161,12 @@ def cov(self, min_periods=None) -> "DataFrame": Minimum number of observations required per pair of columns to have a valid result. + ddof : int, default 1 + Delta degrees of freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + + versionadded:: 1.1.0 + Returns ------- DataFrame @@ -8174,7 +8182,7 @@ def cov(self, min_periods=None) -> "DataFrame": Notes ----- Returns the covariance matrix of the DataFrame's time series. - The covariance is normalized by N-1. + The covariance is normalized by N-ddof. For DataFrames that have Series that are missing data (assuming that data is `missing at random @@ -8237,7 +8245,7 @@ def cov(self, min_periods=None) -> "DataFrame": base_cov = np.empty((mat.shape[1], mat.shape[1])) base_cov.fill(np.nan) else: - base_cov = np.cov(mat.T) + base_cov = np.cov(mat.T, ddof=ddof) base_cov = base_cov.reshape((len(cols), len(cols))) else: base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e7e5e37bb7817..e7e28798d84a2 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1383,7 +1383,12 @@ def func(a, b): @disallow("M8", "m8") -def nancov(a: np.ndarray, b: np.ndarray, min_periods: Optional[int] = None): +def nancov( + a: np.ndarray, + b: np.ndarray, + min_periods: Optional[int] = None, + ddof: Optional[int] = 1, +): if len(a) != len(b): raise AssertionError("Operands to nancov must have same size") @@ -1398,7 +1403,7 @@ def nancov(a: np.ndarray, b: np.ndarray, min_periods: Optional[int] = None): if len(a) < min_periods: return np.nan - return np.cov(a, b)[0, 1] + return np.cov(a, b, ddof=ddof)[0, 1] def _ensure_numeric(x): diff --git a/pandas/core/series.py b/pandas/core/series.py index a27e44efe1a97..87edcdeb05522 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2291,7 +2291,12 @@ def corr(self, other, method="pearson", min_periods=None) -> float: f"'{method}' was supplied" ) - def cov(self, other, min_periods=None) -> float: + def cov( + self, + other: "Series", + min_periods: Optional[int] = None, + ddof: Optional[int] = 1, + ) -> float: """ Compute covariance with Series, excluding missing values. @@ -2301,6 +2306,11 @@ def cov(self, other, min_periods=None) -> float: Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. + ddof : int, default 1 + Delta degrees of freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + + versionadded:: 1.1.0 Returns ------- @@ -2322,7 +2332,9 @@ def cov(self, other, min_periods=None) -> float: this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan - return nanops.nancov(this.values, other.values, min_periods=min_periods) + return nanops.nancov( + this.values, other.values, min_periods=min_periods, ddof=ddof + ) @doc( klass="Series", diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index 7d75db55c3073..d3548b639572d 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -58,6 +58,17 @@ def test_cov(self, float_frame, float_string_frame): ) tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3]) + def test_cov_ddof(self, test_ddof): + # GH#34611 + np_array1 = np.random.rand(10) + np_array2 = np.random.rand(10) + df = DataFrame({0: np_array1, 1: np_array2}) + result = df.cov(ddof=test_ddof) + expected_np = np.cov(np_array1, np_array2, ddof=test_ddof) + expected = DataFrame(expected_np) + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( "other_column", [pd.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])] ) diff --git a/pandas/tests/series/methods/test_cov_corr.py b/pandas/tests/series/methods/test_cov_corr.py index 1f6033d435323..282f499506aae 100644 --- a/pandas/tests/series/methods/test_cov_corr.py +++ b/pandas/tests/series/methods/test_cov_corr.py @@ -1,3 +1,5 @@ +import math + import numpy as np import pytest @@ -36,6 +38,19 @@ def test_cov(self, datetime_series): ts2 = datetime_series[5:].reindex(datetime_series.index) assert isna(ts1.cov(ts2, min_periods=12)) + @pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3]) + def test_cov_ddof(self, test_ddof): + # GH#34611 + np_array1 = np.random.rand(10) + np_array2 = np.random.rand(10) + + s1 = Series(np_array1) + s2 = Series(np_array2) + + result = s1.cov(s2, ddof=test_ddof) + expected = np.cov(np_array1, np_array2, ddof=test_ddof)[0][1] + assert math.isclose(expected, result) + class TestSeriesCorr: @td.skip_if_no_scipy
- [ x] closes #28823 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34611
2020-06-05T20:57:50Z
2020-06-23T17:37:58Z
2020-06-23T17:37:58Z
2020-06-23T17:38:04Z
TST: mark tzlocal tests as slow, closes #34413
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 9909e554aa14d..8d7d45f54ad5f 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -2,6 +2,7 @@ from functools import partial from io import StringIO +from dateutil.tz import tzlocal import numpy as np import pytest import pytz @@ -480,6 +481,11 @@ def test_upsample_with_limit(): @pytest.mark.parametrize("rule", ["Y", "3M", "15D", "30H", "15Min", "30S"]) def test_nearest_upsample_with_limit(tz_aware_fixture, freq, rule): # GH 33939 + tz = tz_aware_fixture + if str(tz) == "tzlocal()" and rule == "30S" and freq in ["Y", "10M"]: + # GH#34413 separate these so we can mark as slow, see + # test_nearest_upsample_with_limit_tzlocal + return rng = date_range("1/1/2000", periods=3, freq=freq, tz=tz_aware_fixture) ts = Series(np.random.randn(len(rng)), rng) @@ -488,6 +494,20 @@ def test_nearest_upsample_with_limit(tz_aware_fixture, freq, rule): tm.assert_series_equal(result, expected) +@pytest.mark.slow +@pytest.mark.parametrize("freq", ["Y", "10M"]) +def test_nearest_upsample_with_limit_tzlocal(freq): + # GH#33939, GH#34413 split off from test_nearest_upsample_with_limit + rule = "30S" + tz = tzlocal() + rng = date_range("1/1/2000", periods=3, freq=freq, tz=tz) + ts = Series(np.random.randn(len(rng)), rng) + + result = ts.resample(rule).nearest(limit=2) + expected = ts.reindex(result.index, method="nearest", limit=2) + tm.assert_series_equal(result, expected) + + def test_resample_ohlc(series): s = series
- [x] closes #34413 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34610
2020-06-05T20:50:23Z
2020-06-08T02:01:41Z
2020-06-08T02:01:41Z
2020-06-08T02:05:00Z
PERF: isinstance ABCIndexClass and ABCExtensionArray
diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index 09b039e317424..36eff214fc314 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -38,7 +38,7 @@ def _check(cls, inst) -> bool: ABCIndexClass = create_pandas_abc_type( "ABCIndexClass", "_typ", - ( + { "index", "int64index", "rangeindex", @@ -50,7 +50,7 @@ def _check(cls, inst) -> bool: "periodindex", "categoricalindex", "intervalindex", - ), + }, ) ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",)) @@ -66,6 +66,6 @@ def _check(cls, inst) -> bool: "ABCExtensionArray", "_typ", # Note: IntervalArray and SparseArray are included bc they have _typ="extension" - ("extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"), + {"extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"}, ) ABCPandasArray = create_pandas_abc_type("ABCPandasArray", "_typ", ("npy_extension",))
``` %timeit isinstance(pd.Series, pd.core.dtypes.generic.ABCIndexClass) # 607 ns ± 12.4 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) - PR # 937 ns ± 37.5 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) - Master %timeit isinstance(pd.Series, pd.core.dtypes.generic.ABCExtensionArray) # 634 ns ± 26.9 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) - PR # 759 ns ± 40.6 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) - Master %timeit isinstance(pd.Series, pd.core.dtypes.generic.ABCSeries) # 638 ns ± 49.1 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) - PR # 644 ns ± 20.1 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) - Master ```
https://api.github.com/repos/pandas-dev/pandas/pulls/34607
2020-06-05T19:02:18Z
2020-06-06T06:33:09Z
2020-06-06T06:33:09Z
2020-06-06T09:16:17Z
#34569 Added proper description for pandas.Series.pop
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d12ebeafe8510..1872f34dfcd7f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4263,6 +4263,49 @@ def fillna( downcast=downcast, ) + def pop(self, item: Label) -> Series: + """ + Return item and drop from frame. Raise KeyError if not found. + + Parameters + ---------- + item : label + Label of column to be popped. + + Returns + ------- + Series + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan)], + ... columns=('name', 'class', 'max_speed')) + >>> df + name class max_speed + 0 falcon bird 389.0 + 1 parrot bird 24.0 + 2 lion mammal 80.5 + 3 monkey mammal NaN + + >>> df.pop('class') + 0 bird + 1 bird + 2 mammal + 3 mammal + Name: class, dtype: object + + >>> df + name max_speed + 0 falcon 389.0 + 1 parrot 24.0 + 2 lion 80.5 + 3 monkey NaN + """ + return super().pop(item=item) + @doc(NDFrame.replace, **_shared_doc_kwargs) def replace( self, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bb2810ba7857f..fa92f702f07f5 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -105,6 +105,7 @@ if TYPE_CHECKING: from pandas.core.resample import Resampler + from pandas.core.series import Series # noqa: F401 # goal is to be able to define the docs close to function, while still being # able to share @@ -657,47 +658,7 @@ def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: result = self.set_axis(new_labels, axis=axis, inplace=False) return result - def pop(self: FrameOrSeries, item) -> FrameOrSeries: - """ - Return item and drop from frame. Raise KeyError if not found. - - Parameters - ---------- - item : str - Label of column to be popped. - - Returns - ------- - Series - - Examples - -------- - >>> df = pd.DataFrame([('falcon', 'bird', 389.0), - ... ('parrot', 'bird', 24.0), - ... ('lion', 'mammal', 80.5), - ... ('monkey', 'mammal', np.nan)], - ... columns=('name', 'class', 'max_speed')) - >>> df - name class max_speed - 0 falcon bird 389.0 - 1 parrot bird 24.0 - 2 lion mammal 80.5 - 3 monkey mammal NaN - - >>> df.pop('class') - 0 bird - 1 bird - 2 mammal - 3 mammal - Name: class, dtype: object - - >>> df - name max_speed - 0 falcon 389.0 - 1 parrot 24.0 - 2 lion 80.5 - 3 monkey NaN - """ + def pop(self, item: Label) -> Union["Series", Any]: result = self[item] del self[item] if self.ndim == 2: @@ -5396,7 +5357,7 @@ def dtypes(self): string object dtype: object """ - from pandas import Series + from pandas import Series # noqa: F811 return Series(self._mgr.get_dtypes(), index=self._info_axis, dtype=np.object_) diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 7d22b86c5c07c..845f6b67693f4 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -1,5 +1,5 @@ import re -from typing import List +from typing import TYPE_CHECKING, List, cast import numpy as np @@ -16,6 +16,9 @@ from pandas.core.reshape.concat import concat from pandas.core.tools.numeric import to_numeric +if TYPE_CHECKING: + from pandas import Series # noqa: F401 + @Appender( _shared_docs["melt"] @@ -106,7 +109,7 @@ def melt( for col in id_vars: id_data = frame.pop(col) if is_extension_array_dtype(id_data): - id_data = concat([id_data] * K, ignore_index=True) + id_data = cast("Series", concat([id_data] * K, ignore_index=True)) else: id_data = np.tile(id_data._values, K) mdata[col] = id_data diff --git a/pandas/core/series.py b/pandas/core/series.py index cab8dd133b579..e8c72125e9998 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4492,6 +4492,33 @@ def fillna( downcast=downcast, ) + def pop(self, item: Label) -> Any: + """ + Return item and drops from series. Raise KeyError if not found. + + Parameters + ---------- + item : label + Index of the element that needs to be removed. + + Returns + ------- + Value that is popped from series. + + Examples + -------- + >>> ser = pd.Series([1,2,3]) + + >>> ser.pop(0) + 1 + + >>> ser + 1 2 + 2 3 + dtype: int64 + """ + return super().pop(item=item) + @doc(NDFrame.replace, klass=_shared_doc_kwargs["klass"]) def replace( self,
- [x] closes #34569 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Updated documentation will look like this: https://i.imgur.com/Tl9JO5X.png This is my first commit to the project. If any changes/modifications let me know.
https://api.github.com/repos/pandas-dev/pandas/pulls/34606
2020-06-05T18:35:17Z
2020-06-23T17:35:08Z
2020-06-23T17:35:08Z
2020-06-23T17:35:15Z
DOC: updated io/pytables.py to fix PR08
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 36cd61b6c3adb..497b25d73df3e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -997,12 +997,14 @@ def put( key : str value : {Series, DataFrame} format : 'fixed(f)|table(t)', default is 'fixed' - fixed(f) : Fixed format - Fast writing/reading. Not-appendable, nor searchable. - table(t) : Table format - Write as a PyTables Table structure which may perform - worse but allow more flexible operations like searching - / selecting subsets of the data. + Format to use when storing object in HDFStore. Value can be one of: + + ``'fixed'`` + Fixed format. Fast writing/reading. Not-appendable, nor searchable. + ``'table'`` + Table format. Write as a PyTables Table structure which may perform + worse but allow more flexible operations like searching / selecting + subsets of the data. append : bool, default False This will force Table format, append the input data to the existing. @@ -1126,10 +1128,12 @@ def append( key : str value : {Series, DataFrame} format : 'table' is the default - table(t) : table format - Write as a PyTables Table structure which may perform - worse but allow more flexible operations like searching - / selecting subsets of the data. + Format to use when storing object in HDFStore. Value can be one of: + + ``'table'`` + Table format. Write as a PyTables Table structure which may perform + worse but allow more flexible operations like searching / selecting + subsets of the data. append : bool, default True Append the input data to the existing. data_columns : list of columns, or True, default None
https://api.github.com/repos/pandas-dev/pandas/pulls/34604
2020-06-05T16:50:48Z
2020-06-08T01:52:30Z
2020-06-08T01:52:30Z
2020-06-08T01:52:36Z
DOC: Fixed docstring in Series .isin() method
diff --git a/pandas/core/series.py b/pandas/core/series.py index 6b5ed86027806..ef47e52151961 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4513,7 +4513,7 @@ def memory_usage(self, index=True, deep=False): def isin(self, values) -> "Series": """ - Check whether `values` are contained in Series. + Whether elements in Series are contained in `values`. Return a boolean Series showing whether each element in the Series matches an element in the passed sequence of `values` exactly.
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34600
2020-06-05T14:50:52Z
2020-06-07T10:58:55Z
2020-06-07T10:58:55Z
2020-06-07T10:59:10Z
BUG: Pandas changes dtypes of columns when no float (or other) assignments are done to this column #34573
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index e5e0b2577d595..ee2a47f056a1f 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -896,6 +896,7 @@ Indexing - Bug in :meth:`DataFrame.truncate` and :meth:`Series.truncate` where index was assumed to be monotone increasing (:issue:`33756`) - Indexing with a list of strings representing datetimes failed on :class:`DatetimeIndex` or :class:`PeriodIndex`(:issue:`11278`) - Bug in :meth:`Series.at` when used with a :class:`MultiIndex` would raise an exception on valid inputs (:issue:`26989`) +- Bug in :meth:`DataFrame.loc` with dictionary of values changes columns with dtype of ``int`` to ``float`` (:issue:`34573`) - Bug in :meth:`Series.loc` when used with a :class:`MultiIndex` would raise an IndexingError when accessing a None value (:issue:`34318`) Missing diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index ab515cb5e606b..326bd00270eca 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1836,7 +1836,10 @@ def _setitem_with_indexer_missing(self, indexer, value): # append a Series value = value.reindex(index=self.obj.columns, copy=True) value.name = indexer - + elif isinstance(value, dict): + value = Series( + value, index=self.obj.columns, name=indexer, dtype=object + ) else: # a list-list if is_list_like_indexer(value): diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index d53665539309c..8fcdae95fbab5 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -126,3 +126,27 @@ def test_setitem_with_unaligned_sparse_value(self): df["new_column"] = sp_series expected = Series(SparseArray([1, 0, 0]), name="new_column") tm.assert_series_equal(df["new_column"], expected) + + def test_setitem_dict_preserves_dtypes(self): + # https://github.com/pandas-dev/pandas/issues/34573 + expected = DataFrame( + { + "a": Series([0, 1, 2], dtype="int64"), + "b": Series([1, 2, 3], dtype=float), + "c": Series([1, 2, 3], dtype=float), + } + ) + df = DataFrame( + { + "a": Series([], dtype="int64"), + "b": Series([], dtype=float), + "c": Series([], dtype=float), + } + ) + for idx, b in enumerate([1, 2, 3]): + df.loc[df.shape[0]] = { + "a": int(idx), + "b": float(b), + "c": float(b), + } + tm.assert_frame_equal(df, expected)
- [ ] closes #34573 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34599
2020-06-05T13:28:05Z
2020-06-09T22:31:41Z
2020-06-09T22:31:41Z
2020-06-10T08:04:13Z
BUG: fix Series.where(cond) when cond is empty
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index b4e29291bb12d..be4f6a3a3098f 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1003,6 +1003,7 @@ Reshaping - Bug in :func:`cut` raised an error when non-unique labels (:issue:`33141`) - Ensure only named functions can be used in :func:`eval()` (:issue:`32460`) - Fixed bug in :func:`melt` where melting MultiIndex columns with ``col_level`` > 0 would raise a ``KeyError`` on ``id_vars`` (:issue:`34129`) +- Bug in :meth:`Series.where` with an empty Series and empty ``cond`` having non-bool dtype (:issue:`34592`) Sparse ^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 197696f8ed4fe..b642ed436444c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8826,16 +8826,17 @@ def _where( msg = "Boolean array expected for the condition, not {dtype}" - if not isinstance(cond, ABCDataFrame): - # This is a single-dimensional object. - if not is_bool_dtype(cond): - raise ValueError(msg.format(dtype=cond.dtype)) - elif not cond.empty: - for dt in cond.dtypes: - if not is_bool_dtype(dt): - raise ValueError(msg.format(dtype=dt)) + if not cond.empty: + if not isinstance(cond, ABCDataFrame): + # This is a single-dimensional object. + if not is_bool_dtype(cond): + raise ValueError(msg.format(dtype=cond.dtype)) + else: + for dt in cond.dtypes: + if not is_bool_dtype(dt): + raise ValueError(msg.format(dtype=dt)) else: - # GH#21947 we have an empty DataFrame, could be object-dtype + # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py index 6765d9f9d8266..8daea84492871 100644 --- a/pandas/tests/series/indexing/test_where.py +++ b/pandas/tests/series/indexing/test_where.py @@ -443,3 +443,10 @@ def test_where_sparse(): result = ser.where(ser >= 2, 0) expected = pd.Series(pd.arrays.SparseArray([0, 2])) tm.assert_series_equal(result, expected) + + +def test_where_empty_series_and_empty_cond_having_non_bool_dtypes(): + # https://github.com/pandas-dev/pandas/issues/34592 + ser = Series([], dtype=float) + result = ser.where([]) + tm.assert_series_equal(result, ser)
- [ ] closes #34592 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry xref #21947 for fix for DataFrame.where
https://api.github.com/repos/pandas-dev/pandas/pulls/34595
2020-06-05T10:13:34Z
2020-06-08T02:15:45Z
2020-06-08T02:15:44Z
2020-06-08T08:31:23Z
Improve document for **kwargs argument of pandas.DataFrame.to_markdown
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5f8ab8966c1f0..b5941d343c5cb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2196,6 +2196,17 @@ def to_feather(self, path, **kwargs) -> None: |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | + + Output markdown with a tabulate option. + + >>> print(df.to_markdown(tablefmt="grid")) + +----+------------+------------+ + | | animal_1 | animal_2 | + +====+============+============+ + | 0 | elk | dog | + +----+------------+------------+ + | 1 | pig | quetzal | + +----+------------+------------+ """ ) @Substitution(klass="DataFrame") diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4b074924baaf2..15a84dc05d12a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1917,7 +1917,8 @@ def _repr_data_resource_(self): mode : str, optional Mode in which file is opened. **kwargs - These parameters will be passed to `tabulate`. + These parameters will be passed to `tabulate \ + <https://pypi.org/project/tabulate>`_. Returns -------
- [x] closes #34568 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34594
2020-06-05T08:48:21Z
2020-06-07T20:59:52Z
2020-06-07T20:59:52Z
2020-06-07T21:00:00Z
DOC: updated base.py and datetimes.py in core/indexes for PR08
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 746fd140e48a1..240882e561bc6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -517,7 +517,7 @@ def is_(self, other) -> bool: Parameters ---------- other : object - other object to compare against. + Other object to compare against. Returns ------- diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 68c55426294ef..e1f0221eaee65 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -721,9 +721,9 @@ def indexer_at_time(self, time, asof=False): Parameters ---------- time : datetime.time or str - datetime.time or string in appropriate format ("%H:%M", "%H%M", - "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", - "%I%M%S%p"). + Time passed in either as object (datetime.time) or as string in + appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", + "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"). Returns ------- @@ -762,9 +762,9 @@ def indexer_between_time( Parameters ---------- start_time, end_time : datetime.time, str - datetime.time or string in appropriate format ("%H:%M", "%H%M", - "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", - "%I%M%S%p"). + Time passed either as object (datetime.time) or as string in + appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", + "%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p"). include_start : bool, default True include_end : bool, default True
https://api.github.com/repos/pandas-dev/pandas/pulls/34591
2020-06-05T05:07:34Z
2020-06-07T21:23:19Z
2020-06-07T21:23:19Z
2020-06-07T21:23:30Z
REF: mix PeriodPseudoDtype into PeriodDtype
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 25e2d8ba477e0..6dbb4ce7bc974 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -1,4 +1,5 @@ __all__ = [ + "dtypes", "localize_pydatetime", "NaT", "NaTType", @@ -17,7 +18,7 @@ "to_offset", ] - +from . import dtypes # type: ignore from .conversion import localize_pydatetime from .nattype import NaT, NaTType, iNaT, is_null_datetimelike, nat_strings from .np_datetime import OutOfBoundsDatetime diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd index 23c473726e5a9..b6373550b1c78 100644 --- a/pandas/_libs/tslibs/dtypes.pxd +++ b/pandas/_libs/tslibs/dtypes.pxd @@ -50,7 +50,9 @@ cdef enum PeriodDtypeCode: U = 11000 # Microsecondly N = 12000 # Nanosecondly + UNDEFINED = -10_000 -cdef class PeriodPseudoDtype: + +cdef class PeriodDtypeBase: cdef readonly: PeriodDtypeCode dtype_code diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index d0d4e579a456b..047f942178179 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -2,7 +2,7 @@ # originals -cdef class PeriodPseudoDtype: +cdef class PeriodDtypeBase: """ Similar to an actual dtype, this contains all of the information describing a PeriodDtype in an integer code. @@ -14,9 +14,9 @@ cdef class PeriodPseudoDtype: self.dtype_code = code def __eq__(self, other): - if not isinstance(other, PeriodPseudoDtype): + if not isinstance(other, PeriodDtypeBase): return False - if not isinstance(self, PeriodPseudoDtype): + if not isinstance(self, PeriodDtypeBase): # cython semantics, this is a reversed op return False return self.dtype_code == other.dtype_code diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 28ead3593cf85..0967fa00e9e62 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -2528,12 +2528,10 @@ cdef class Week(SingleConstructorOffset): ------- result : DatetimeIndex """ - from .frequencies import get_freq_code # TODO: avoid circular import - i8other = dtindex.asi8 off = (i8other % DAY_NANOS).view("timedelta64[ns]") - base, mult = get_freq_code(self.freqstr) + base = self._period_dtype_code base_period = dtindex.to_period(base) if self.n > 0: diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 5c890c7fbf59d..a436bd7f0c9ed 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -56,7 +56,7 @@ from pandas._libs.tslibs.ccalendar cimport ( ) from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS -from pandas._libs.tslibs.dtypes cimport PeriodPseudoDtype +from pandas._libs.tslibs.dtypes cimport PeriodDtypeBase from pandas._libs.tslibs.frequencies cimport ( attrname_to_abbrevs, @@ -1517,7 +1517,7 @@ cdef class _Period: cdef readonly: int64_t ordinal - PeriodPseudoDtype _dtype + PeriodDtypeBase _dtype BaseOffset freq def __cinit__(self, int64_t ordinal, BaseOffset freq): @@ -1526,7 +1526,7 @@ cdef class _Period: # Note: this is more performant than PeriodDtype.from_date_offset(freq) # because from_date_offset cannot be made a cdef method (until cython # supported cdef classmethods) - self._dtype = PeriodPseudoDtype(freq._period_dtype_code) + self._dtype = PeriodDtypeBase(freq._period_dtype_code) @classmethod def _maybe_convert_freq(cls, object freq) -> BaseOffset: @@ -2463,7 +2463,7 @@ class Period(_Period): raise ValueError(msg) if ordinal is None: - base, mult = get_freq_code(freq) + base, _ = get_freq_code(freq) ordinal = period_ordinal(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, 0, base) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 1b8a0b2780a7d..b16a3df003512 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -52,7 +52,7 @@ def _field_accessor(name: str, docstring=None): def f(self): - base, _ = libfrequencies.get_freq_code(self.freq) + base = self.freq._period_dtype_code result = get_period_field_arr(name, self.asi8, base) return result @@ -440,12 +440,12 @@ def to_timestamp(self, freq=None, how="start"): return (self + self.freq).to_timestamp(how="start") - adjust if freq is None: - base, mult = libfrequencies.get_freq_code(self.freq) + base = self.freq._period_dtype_code freq = libfrequencies.get_to_timestamp_base(base) else: freq = Period._maybe_convert_freq(freq) - base, mult = libfrequencies.get_freq_code(freq) + base, _ = libfrequencies.get_freq_code(freq) new_data = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base) @@ -523,14 +523,14 @@ def asfreq(self, freq=None, how: str = "E") -> "PeriodArray": freq = Period._maybe_convert_freq(freq) - base1, mult1 = libfrequencies.get_freq_code(self.freq) - base2, mult2 = libfrequencies.get_freq_code(freq) + base1 = self.freq._period_dtype_code + base2 = freq._period_dtype_code asi8 = self.asi8 - # mult1 can't be negative or 0 + # self.freq.n can't be negative or 0 end = how == "E" if end: - ordinal = asi8 + mult1 - 1 + ordinal = asi8 + self.freq.n - 1 else: ordinal = asi8 @@ -950,7 +950,7 @@ def dt64arr_to_periodarr(data, freq, tz=None): if isinstance(data, (ABCIndexClass, ABCSeries)): data = data._values - base, mult = libfrequencies.get_freq_code(freq) + base = freq._period_dtype_code return libperiod.dt64arr_to_periodarr(data.view("i8"), base, tz), freq diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 84284c581c9e5..b9d16ac5959e3 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -20,7 +20,7 @@ import pytz from pandas._libs.interval import Interval -from pandas._libs.tslibs import NaT, Period, Timestamp, timezones, to_offset +from pandas._libs.tslibs import NaT, Period, Timestamp, dtypes, timezones, to_offset from pandas._libs.tslibs.offsets import BaseOffset from pandas._typing import DtypeObj, Ordered @@ -848,7 +848,7 @@ def __setstate__(self, state) -> None: @register_extension_dtype -class PeriodDtype(PandasExtensionDtype): +class PeriodDtype(dtypes.PeriodDtypeBase, PandasExtensionDtype): """ An ExtensionDtype for Period data. @@ -896,7 +896,8 @@ def __new__(cls, freq=None): elif freq is None: # empty constructor for pickle compat - u = object.__new__(cls) + # -10_000 corresponds to PeriodDtypeCode.UNDEFINED + u = dtypes.PeriodDtypeBase.__new__(cls, -10_000) u._freq = None return u @@ -906,11 +907,15 @@ def __new__(cls, freq=None): try: return cls._cache[freq.freqstr] except KeyError: - u = object.__new__(cls) + dtype_code = freq._period_dtype_code + u = dtypes.PeriodDtypeBase.__new__(cls, dtype_code) u._freq = freq cls._cache[freq.freqstr] = u return u + def __reduce__(self): + return type(self), (self.freq,) + @property def freq(self): """ @@ -977,7 +982,7 @@ def __eq__(self, other: Any) -> bool: return isinstance(other, PeriodDtype) and self.freq == other.freq def __setstate__(self, state): - # for pickle compat. __get_state__ is defined in the + # for pickle compat. __getstate__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._freq = state["freq"] diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 475452c71db58..a9cca32271b9f 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -6,7 +6,7 @@ import numpy as np from pandas._libs.tslibs import Period, to_offset -from pandas._libs.tslibs.frequencies import FreqGroup, base_and_stride, get_freq_code +from pandas._libs.tslibs.frequencies import FreqGroup, base_and_stride from pandas._typing import FrameOrSeriesUnion from pandas.core.dtypes.generic import ( @@ -213,7 +213,7 @@ def _use_dynamic_x(ax, data: "FrameOrSeriesUnion") -> bool: # FIXME: hack this for 0.10.1, creating more technical debt...sigh if isinstance(data.index, ABCDatetimeIndex): - base = get_freq_code(freq)[0] + base = to_offset(freq)._period_dtype_code x = data.index if base <= FreqGroup.FR_DAY: return x[:1].is_normalized diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 2684aa2c590d9..3b9d3dc0b91f6 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -67,7 +67,11 @@ def test_pickle(self, dtype): # force back to the cache result = tm.round_trip_pickle(dtype) - assert not len(dtype._cache) + if not isinstance(dtype, PeriodDtype): + # Because PeriodDtype has a cython class as a base class, + # it has different pickle semantics, and its cache is re-populated + # on un-pickling. + assert not len(dtype._cache) assert result == dtype
This will allow us to start getting rid of `get_freq_code` usages and related libfrequencies functions that are unnecessarily roundabout.
https://api.github.com/repos/pandas-dev/pandas/pulls/34590
2020-06-05T04:37:11Z
2020-06-08T16:17:25Z
2020-06-08T16:17:25Z
2020-06-08T16:25:09Z
DataFrame.truncate drops MultiIndex names
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 7834e1a5c4898..5ad6ad73ae550 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -911,6 +911,7 @@ MultiIndex df.loc[(['b', 'a'], [2, 1]), :] - Bug in :meth:`MultiIndex.intersection` was not guaranteed to preserve order when ``sort=False``. (:issue:`31325`) +- Bug in :meth:`DataFrame.truncate` was dropping :class:`MultiIndex` names. (:issue:`34564`) .. ipython:: python diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fc2d4cf4621c4..4293fd04a5093 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3195,7 +3195,12 @@ def truncate(self, before=None, after=None): new_codes = [level_codes[left:right] for level_codes in self.codes] new_codes[0] = new_codes[0] - i - return MultiIndex(levels=new_levels, codes=new_codes, verify_integrity=False) + return MultiIndex( + levels=new_levels, + codes=new_codes, + names=self._names, + verify_integrity=False, + ) def equals(self, other) -> bool: """ diff --git a/pandas/tests/frame/methods/test_truncate.py b/pandas/tests/frame/methods/test_truncate.py index 768a5f22fb063..674f482c478a0 100644 --- a/pandas/tests/frame/methods/test_truncate.py +++ b/pandas/tests/frame/methods/test_truncate.py @@ -104,3 +104,16 @@ def test_truncate_decreasing_index(self, before, after, indices, klass): result = values.truncate(before=before, after=after) expected = values.loc[indices] tm.assert_frame_equal(result, expected) + + def test_truncate_multiindex(self): + # GH 34564 + mi = pd.MultiIndex.from_product([[1, 2, 3, 4], ["A", "B"]], names=["L1", "L2"]) + s1 = pd.DataFrame(range(mi.shape[0]), index=mi, columns=["col"]) + result = s1.truncate(before=2, after=3) + + df = pd.DataFrame.from_dict( + {"L1": [2, 2, 3, 3], "L2": ["A", "B", "A", "B"], "col": [2, 3, 4, 5]} + ) + expected = df.set_index(["L1", "L2"]) + + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py index 154ed22214830..9e4e73e793bac 100644 --- a/pandas/tests/indexes/multi/test_analytics.py +++ b/pandas/tests/indexes/multi/test_analytics.py @@ -30,7 +30,8 @@ def test_groupby(idx): tm.assert_dict_equal(groups, exp) -def test_truncate(): +def test_truncate_multiindex(): + # GH 34564 for MultiIndex level names check major_axis = Index(list(range(4))) minor_axis = Index(list(range(2))) @@ -38,19 +39,24 @@ def test_truncate(): minor_codes = np.array([0, 1, 0, 1, 0, 1]) index = MultiIndex( - levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=["L1", "L2"], ) result = index.truncate(before=1) assert "foo" not in result.levels[0] assert 1 in result.levels[0] + assert index.names == result.names result = index.truncate(after=1) assert 2 not in result.levels[0] assert 1 in result.levels[0] + assert index.names == result.names result = index.truncate(before=1, after=2) assert len(result.levels[0]) == 2 + assert index.names == result.names msg = "after < before" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py index 47947f0287494..8a2c62cee7e24 100644 --- a/pandas/tests/series/methods/test_truncate.py +++ b/pandas/tests/series/methods/test_truncate.py @@ -126,3 +126,17 @@ def test_truncate_periodindex(self): expected_idx2 = pd.PeriodIndex([pd.Period("2017-09-02")]) tm.assert_series_equal(result2, pd.Series([2], index=expected_idx2)) + + def test_truncate_multiindex(self): + # GH 34564 + mi = pd.MultiIndex.from_product([[1, 2, 3, 4], ["A", "B"]], names=["L1", "L2"]) + s1 = pd.Series(range(mi.shape[0]), index=mi, name="col") + result = s1.truncate(before=2, after=3) + + df = pd.DataFrame.from_dict( + {"L1": [2, 2, 3, 3], "L2": ["A", "B", "A", "B"], "col": [2, 3, 4, 5]} + ) + df.set_index(["L1", "L2"], inplace=True) + expected = df.col + + tm.assert_series_equal(result, expected)
- [x] closes #34564 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry - [x] added tests - [ ] passes tests (fixed for `DataFrame`, still issue for `Series`)
https://api.github.com/repos/pandas-dev/pandas/pulls/34589
2020-06-05T04:23:41Z
2020-06-20T20:04:28Z
2020-06-20T20:04:28Z
2020-06-21T03:24:54Z
REF: implement c_FreqGroup, FreqGroup in tslibs.dtypes
diff --git a/pandas/_libs/tslibs/dtypes.pxd b/pandas/_libs/tslibs/dtypes.pxd index b6373550b1c78..bce071d45c12f 100644 --- a/pandas/_libs/tslibs/dtypes.pxd +++ b/pandas/_libs/tslibs/dtypes.pxd @@ -1,4 +1,21 @@ +cdef enum c_FreqGroup: + # Mirrors FreqGroup in the .pxy file + FR_ANN = 1000 + FR_QTR = 2000 + FR_MTH = 3000 + FR_WK = 4000 + FR_BUS = 5000 + FR_DAY = 6000 + FR_HR = 7000 + FR_MIN = 8000 + FR_SEC = 9000 + FR_MS = 10000 + FR_US = 11000 + FR_NS = 12000 + FR_UND = -10000 # undefined + + cdef enum PeriodDtypeCode: # Annual freqs with various fiscal year ends. # eg, 2005 for A_FEB runs Mar 1, 2004 to Feb 28, 2005 diff --git a/pandas/_libs/tslibs/dtypes.pyx b/pandas/_libs/tslibs/dtypes.pyx index 047f942178179..e38cfe21a65cc 100644 --- a/pandas/_libs/tslibs/dtypes.pyx +++ b/pandas/_libs/tslibs/dtypes.pyx @@ -106,3 +106,20 @@ _period_code_map.update({ "W": 4000, # Weekly "C": 5000, # Custom Business Day }) + + +class FreqGroup: + # Mirrors c_FreqGroup in the .pxd file + FR_ANN = 1000 + FR_QTR = 2000 + FR_MTH = 3000 + FR_WK = 4000 + FR_BUS = 5000 + FR_DAY = 6000 + FR_HR = 7000 + FR_MIN = 8000 + FR_SEC = 9000 + FR_MS = 10000 + FR_US = 11000 + FR_NS = 12000 + FR_UND = -10000 # undefined diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index 8ca442de59f9f..9ff34ef0b6f89 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -12,27 +12,12 @@ from pandas._libs.tslibs.offsets import ( opattern, ) -from .dtypes import _period_code_map, _reverse_period_code_map +from .dtypes import FreqGroup, _period_code_map, _reverse_period_code_map # --------------------------------------------------------------------- # Period codes -class FreqGroup: - FR_ANN = 1000 - FR_QTR = 2000 - FR_MTH = 3000 - FR_WK = 4000 - FR_BUS = 5000 - FR_DAY = 6000 - FR_HR = 7000 - FR_MIN = 8000 - FR_SEC = 9000 - FR_MS = 10000 - FR_US = 11000 - FR_NS = 12000 - - # Map attribute-name resolutions to resolution abbreviations _attrname_to_abbrevs = { "year": "A", diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 55148041c1718..43a18a7c63f52 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -56,7 +56,22 @@ from pandas._libs.tslibs.ccalendar cimport ( ) from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS -from pandas._libs.tslibs.dtypes cimport PeriodDtypeBase +from pandas._libs.tslibs.dtypes cimport ( + PeriodDtypeBase, + FR_UND, + FR_ANN, + FR_QTR, + FR_MTH, + FR_WK, + FR_BUS, + FR_DAY, + FR_HR, + FR_MIN, + FR_SEC, + FR_MS, + FR_US, + FR_NS, +) from pandas._libs.tslibs.frequencies cimport ( attrname_to_abbrevs, @@ -98,23 +113,6 @@ ctypedef int64_t (*freq_conv_func)(int64_t, asfreq_info*) nogil cdef extern from *: """ - /*** FREQUENCY CONSTANTS ***/ - // See frequencies.pyx for more detailed variants - - #define FR_ANN 1000 /* Annual */ - #define FR_QTR 2000 /* Quarterly - December year end (default Q) */ - #define FR_MTH 3000 /* Monthly */ - #define FR_WK 4000 /* Weekly */ - #define FR_BUS 5000 /* Business days */ - #define FR_DAY 6000 /* Daily */ - #define FR_HR 7000 /* Hourly */ - #define FR_MIN 8000 /* Minutely */ - #define FR_SEC 9000 /* Secondly */ - #define FR_MS 10000 /* Millisecondly */ - #define FR_US 11000 /* Microsecondly */ - #define FR_NS 12000 /* Nanosecondly */ - #define FR_UND -10000 /* Undefined */ - // must use npy typedef b/c int64_t is aliased in cython-generated c // unclear why we need LL for that row. // see https://github.com/pandas-dev/pandas/pull/34416/ @@ -128,20 +126,6 @@ cdef extern from *: {0, 0, 0, 0, 0, 0, 1}}; """ int64_t daytime_conversion_factor_matrix[7][7] - # TODO: Can we get these frequencies from frequencies.FreqGroup? - int FR_ANN - int FR_QTR - int FR_MTH - int FR_WK - int FR_DAY - int FR_HR - int FR_MIN - int FR_SEC - int FR_MS - int FR_US - int FR_NS - int FR_BUS - int FR_UND cdef int max_value(int left, int right) nogil: @@ -1159,30 +1143,29 @@ cdef str period_format(int64_t value, int freq, object fmt=None): if fmt is None: freq_group = get_freq_group(freq) - if freq_group == 1000: # FR_ANN + if freq_group == FR_ANN: fmt = b'%Y' - elif freq_group == 2000: # FR_QTR + elif freq_group == FR_QTR: fmt = b'%FQ%q' - elif freq_group == 3000: # FR_MTH + elif freq_group == FR_MTH: fmt = b'%Y-%m' - elif freq_group == 4000: # WK - left = period_asfreq(value, freq, 6000, 0) - right = period_asfreq(value, freq, 6000, 1) - return f"{period_format(left, 6000)}/{period_format(right, 6000)}" - elif (freq_group == 5000 # BUS - or freq_group == 6000): # DAY + elif freq_group == FR_WK: + left = period_asfreq(value, freq, FR_DAY, 0) + right = period_asfreq(value, freq, FR_DAY, 1) + return f"{period_format(left, FR_DAY)}/{period_format(right, FR_DAY)}" + elif freq_group == FR_BUS or freq_group == FR_DAY: fmt = b'%Y-%m-%d' - elif freq_group == 7000: # HR + elif freq_group == FR_HR: fmt = b'%Y-%m-%d %H:00' - elif freq_group == 8000: # MIN + elif freq_group == FR_MIN: fmt = b'%Y-%m-%d %H:%M' - elif freq_group == 9000: # SEC + elif freq_group == FR_SEC: fmt = b'%Y-%m-%d %H:%M:%S' - elif freq_group == 10000: # MILLISEC + elif freq_group == FR_MS: fmt = b'%Y-%m-%d %H:%M:%S.%l' - elif freq_group == 11000: # MICROSEC + elif freq_group == FR_US: fmt = b'%Y-%m-%d %H:%M:%S.%u' - elif freq_group == 12000: # NANOSEC + elif freq_group == FR_NS: fmt = b'%Y-%m-%d %H:%M:%S.%n' else: raise ValueError(f"Unknown freq: {freq}")
These are already used in libperiod, better to be explicit about it. Coming up: make FreqGroup into an enum with helper methods to convert between FreqGroup/PeriodDtype/DateOffset/Resolution
https://api.github.com/repos/pandas-dev/pandas/pulls/34588
2020-06-05T03:56:42Z
2020-06-08T23:00:25Z
2020-06-08T23:00:25Z
2020-06-08T23:11:06Z
ENH: Resolutions for month/qtr/year
diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index c0baabdc98acd..7453933ddbb4f 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -8,9 +8,10 @@ from pandas._libs.tslibs.util cimport get_nat from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dt64_to_dtstruct) from pandas._libs.tslibs.frequencies cimport attrname_to_abbrevs +from pandas._libs.tslibs.frequencies import FreqGroup from pandas._libs.tslibs.timezones cimport ( is_utc, is_tzlocal, maybe_get_tz, get_dst_info) -from pandas._libs.tslibs.ccalendar cimport get_days_in_month +from pandas._libs.tslibs.ccalendar cimport get_days_in_month, c_MONTH_NUMBERS from pandas._libs.tslibs.tzconversion cimport tz_convert_utc_to_tzlocal # ---------------------------------------------------------------------- @@ -26,6 +27,9 @@ cdef: int RESO_MIN = 4 int RESO_HR = 5 int RESO_DAY = 6 + int RESO_MTH = 7 + int RESO_QTR = 8 + int RESO_YR = 9 _abbrev_to_attrnames = {v: k for k, v in attrname_to_abbrevs.items()} @@ -37,6 +41,9 @@ _reso_str_map = { RESO_MIN: "minute", RESO_HR: "hour", RESO_DAY: "day", + RESO_MTH: "month", + RESO_QTR: "quarter", + RESO_YR: "year", } _str_reso_map = {v: k for k, v in _reso_str_map.items()} @@ -126,6 +133,9 @@ class Resolution(Enum): RESO_MIN = 4 RESO_HR = 5 RESO_DAY = 6 + RESO_MTH = 7 + RESO_QTR = 8 + RESO_YR = 9 def __lt__(self, other): return self.value < other.value @@ -133,6 +143,32 @@ class Resolution(Enum): def __ge__(self, other): return self.value >= other.value + @property + def freq_group(self): + # TODO: annotate as returning FreqGroup once that is an enum + if self == Resolution.RESO_NS: + return FreqGroup.FR_NS + elif self == Resolution.RESO_US: + return FreqGroup.FR_US + elif self == Resolution.RESO_MS: + return FreqGroup.FR_MS + elif self == Resolution.RESO_SEC: + return FreqGroup.FR_SEC + elif self == Resolution.RESO_MIN: + return FreqGroup.FR_MIN + elif self == Resolution.RESO_HR: + return FreqGroup.FR_HR + elif self == Resolution.RESO_DAY: + return FreqGroup.FR_DAY + elif self == Resolution.RESO_MTH: + return FreqGroup.FR_MTH + elif self == Resolution.RESO_QTR: + return FreqGroup.FR_QTR + elif self == Resolution.RESO_YR: + return FreqGroup.FR_ANN + else: + raise ValueError(self) + @property def attrname(self) -> str: """ @@ -175,7 +211,19 @@ class Resolution(Enum): >>> Resolution.get_reso_from_freq('H') == Resolution.RESO_HR True """ - attr_name = _abbrev_to_attrnames[freq] + try: + attr_name = _abbrev_to_attrnames[freq] + except KeyError: + # For quarterly and yearly resolutions, we need to chop off + # a month string. + split_freq = freq.split("-") + if len(split_freq) != 2: + raise + if split_freq[1] not in c_MONTH_NUMBERS: + # i.e. we want e.g. "Q-DEC", not "Q-INVALID" + raise + attr_name = _abbrev_to_attrnames[split_freq[0]] + return cls.from_attrname(attr_name) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index e2ecb6c343b7a..8af23815b54ef 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1122,11 +1122,6 @@ def resolution(self) -> str: """ Returns day, hour, minute, second, millisecond or microsecond """ - if self._resolution_obj is None: - if is_period_dtype(self.dtype): - # somewhere in the past it was decided we default to day - return "day" - # otherwise we fall through and will raise return self._resolution_obj.attrname # type: ignore @classmethod diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 21f4b3f8bb76a..ca6eb45e22c69 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -363,19 +363,23 @@ def _format_attrs(self): # -------------------------------------------------------------------- # Indexing Methods - def _validate_partial_date_slice(self, reso: str): + def _validate_partial_date_slice(self, reso: Resolution): raise NotImplementedError - def _parsed_string_to_bounds(self, reso: str, parsed: datetime): + def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): raise NotImplementedError def _partial_date_slice( - self, reso: str, parsed: datetime, use_lhs: bool = True, use_rhs: bool = True + self, + reso: Resolution, + parsed: datetime, + use_lhs: bool = True, + use_rhs: bool = True, ): """ Parameters ---------- - reso : str + reso : Resolution parsed : datetime use_lhs : bool, default True use_rhs : bool, default True diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index d8654dee56319..2919ef0f878a4 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -7,7 +7,6 @@ from pandas._libs import NaT, Period, Timestamp, index as libindex, lib, tslib from pandas._libs.tslibs import Resolution, fields, parsing, timezones, to_offset -from pandas._libs.tslibs.frequencies import get_freq_group from pandas._libs.tslibs.offsets import prefix_mapping from pandas._typing import DtypeObj, Label from pandas.util._decorators import cache_readonly @@ -470,7 +469,7 @@ def snap(self, freq="S"): dta = DatetimeArray(snapped, dtype=self.dtype) return DatetimeIndex._simple_new(dta, name=self.name) - def _parsed_string_to_bounds(self, reso: str, parsed: datetime): + def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): """ Calculate datetime bounds for parsed time string and its resolution. @@ -485,6 +484,7 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime): ------- lower, upper: pd.Timestamp """ + assert isinstance(reso, Resolution), (type(reso), reso) valid_resos = { "year", "month", @@ -497,10 +497,10 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime): "second", "microsecond", } - if reso not in valid_resos: + if reso.attrname not in valid_resos: raise KeyError - grp = get_freq_group(reso) + grp = reso.freq_group per = Period(parsed, freq=grp) start, end = per.start_time, per.end_time @@ -521,11 +521,12 @@ def _parsed_string_to_bounds(self, reso: str, parsed: datetime): end = end.tz_localize(self.tz) return start, end - def _validate_partial_date_slice(self, reso: str): + def _validate_partial_date_slice(self, reso: Resolution): + assert isinstance(reso, Resolution), (type(reso), reso) if ( self.is_monotonic - and reso in ["day", "hour", "minute", "second"] - and self._resolution_obj >= Resolution.from_attrname(reso) + and reso.attrname in ["day", "hour", "minute", "second"] + and self._resolution_obj >= reso ): # These resolution/monotonicity validations came from GH3931, # GH3452 and GH2369. @@ -625,6 +626,7 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): if isinstance(label, str): freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None)) parsed, reso = parsing.parse_time_string(label, freq) + reso = Resolution.from_attrname(reso) lower, upper = self._parsed_string_to_bounds(reso, parsed) # lower, upper form the half-open interval: # [parsed, parsed + 1 freq) @@ -641,6 +643,7 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True): freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None)) parsed, reso = parsing.parse_time_string(key, freq) + reso = Resolution.from_attrname(reso) loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs) return loc diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 0fafeef078d78..43dfd94b49215 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -5,7 +5,7 @@ from pandas._libs import index as libindex from pandas._libs.lib import no_default -from pandas._libs.tslibs import Period +from pandas._libs.tslibs import Period, Resolution from pandas._libs.tslibs.frequencies import get_freq_group from pandas._libs.tslibs.parsing import DateParseError, parse_time_string from pandas._typing import DtypeObj, Label @@ -501,7 +501,8 @@ def get_loc(self, key, method=None, tolerance=None): # A string with invalid format raise KeyError(f"Cannot interpret '{key}' as period") from err - grp = get_freq_group(reso) + reso = Resolution.from_attrname(reso) + grp = reso.freq_group freqn = get_freq_group(self.freq) # _get_string_slice will handle cases where grp < freqn @@ -558,6 +559,7 @@ def _maybe_cast_slice_bound(self, label, side: str, kind: str): elif isinstance(label, str): try: parsed, reso = parse_time_string(label, self.freq) + reso = Resolution.from_attrname(reso) bounds = self._parsed_string_to_bounds(reso, parsed) return bounds[0 if side == "left" else 1] except ValueError as err: @@ -569,16 +571,14 @@ def _maybe_cast_slice_bound(self, label, side: str, kind: str): return label - def _parsed_string_to_bounds(self, reso: str, parsed: datetime): - if reso not in ["year", "month", "quarter", "day", "hour", "minute", "second"]: - raise KeyError(reso) - - grp = get_freq_group(reso) + def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): + grp = reso.freq_group iv = Period(parsed, freq=grp) return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end")) - def _validate_partial_date_slice(self, reso: str): - grp = get_freq_group(reso) + def _validate_partial_date_slice(self, reso: Resolution): + assert isinstance(reso, Resolution), (type(reso), reso) + grp = reso.freq_group freqn = get_freq_group(self.freq) if not grp < freqn: @@ -590,7 +590,7 @@ def _validate_partial_date_slice(self, reso: str): def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True): # TODO: Check for non-True use_lhs/use_rhs parsed, reso = parse_time_string(key, self.freq) - + reso = Resolution.from_attrname(reso) try: return self._partial_date_slice(reso, parsed, use_lhs, use_rhs) except KeyError as err: diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index fc44226f9d72f..e7dd76584d780 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -7,24 +7,23 @@ class TestPeriodIndexOps: - def test_resolution(self): - for freq, expected in zip( - ["A", "Q", "M", "D", "H", "T", "S", "L", "U"], - [ - "day", - "day", - "day", - "day", - "hour", - "minute", - "second", - "millisecond", - "microsecond", - ], - ): - - idx = pd.period_range(start="2013-04-01", periods=30, freq=freq) - assert idx.resolution == expected + @pytest.mark.parametrize( + "freq,expected", + [ + ("A", "year"), + ("Q", "quarter"), + ("M", "month"), + ("D", "day"), + ("H", "hour"), + ("T", "minute"), + ("S", "second"), + ("L", "millisecond"), + ("U", "microsecond"), + ], + ) + def test_resolution(self, freq, expected): + idx = pd.period_range(start="2013-04-01", periods=30, freq=freq) + assert idx.resolution == expected def test_value_counts_unique(self): # GH 7735 diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py index 4df221913b805..f0ff449d902d0 100644 --- a/pandas/tests/tseries/frequencies/test_freq_code.py +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -90,6 +90,9 @@ def test_get_to_timestamp_base(freqstr, exp_freqstr): @pytest.mark.parametrize( "freqstr,expected", [ + ("A", "year"), + ("Q", "quarter"), + ("M", "month"), ("D", "day"), ("H", "hour"), ("T", "minute"), @@ -103,13 +106,6 @@ def test_get_attrname_from_abbrev(freqstr, expected): assert Resolution.get_reso_from_freq(freqstr).attrname == expected -@pytest.mark.parametrize("freq", ["A", "Q", "M"]) -def test_get_freq_unsupported_(freq): - # Lowest-frequency resolution is for Day - with pytest.raises(KeyError, match=freq.lower()): - Resolution.get_reso_from_freq(freq) - - @pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U", "N"]) def test_get_freq_roundtrip2(freq): obj = Resolution.get_reso_from_freq(freq)
This does three things 1) Use Resolution objects instead of strings in the places in core.indexes. This makes whats going on much clearer. 2) To make 1) possible, extend Resolution to include month, quarter, and year. This means that `pd.period_range("2017", periods=3, freq="A").resolution` is now "year" instead of "day", which is more accurate. 3) with year/quarter/month added, `Resolution` and `FreqGroup` will be re-labellings of each other, so we can de-duplicate them. They will end up living in tslibs.dtypes.
https://api.github.com/repos/pandas-dev/pandas/pulls/34587
2020-06-05T00:59:20Z
2020-06-09T22:32:55Z
2020-06-09T22:32:55Z
2020-06-09T23:39:59Z
REF: ensure we have offset objects in plotting functions
diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index b8be8a66a59fd..65f030223c7ca 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -11,7 +11,9 @@ import numpy as np from pandas._libs import lib, tslibs -from pandas._libs.tslibs.frequencies import FreqGroup, get_freq_code, get_freq_group +from pandas._libs.tslibs import to_offset +from pandas._libs.tslibs.frequencies import FreqGroup, get_freq_group +from pandas._libs.tslibs.offsets import BaseOffset from pandas.core.dtypes.common import ( is_datetime64_ns_dtype, @@ -522,34 +524,36 @@ def has_level_label(label_flags, vmin): return True -def _daily_finder(vmin, vmax, freq): +def _daily_finder(vmin, vmax, freq: BaseOffset): + dtype_code = freq._period_dtype_code + periodsperday = -1 - if freq >= FreqGroup.FR_HR: - if freq == FreqGroup.FR_NS: + if dtype_code >= FreqGroup.FR_HR: + if dtype_code == FreqGroup.FR_NS: periodsperday = 24 * 60 * 60 * 1000000000 - elif freq == FreqGroup.FR_US: + elif dtype_code == FreqGroup.FR_US: periodsperday = 24 * 60 * 60 * 1000000 - elif freq == FreqGroup.FR_MS: + elif dtype_code == FreqGroup.FR_MS: periodsperday = 24 * 60 * 60 * 1000 - elif freq == FreqGroup.FR_SEC: + elif dtype_code == FreqGroup.FR_SEC: periodsperday = 24 * 60 * 60 - elif freq == FreqGroup.FR_MIN: + elif dtype_code == FreqGroup.FR_MIN: periodsperday = 24 * 60 - elif freq == FreqGroup.FR_HR: + elif dtype_code == FreqGroup.FR_HR: periodsperday = 24 else: # pragma: no cover - raise ValueError(f"unexpected frequency: {freq}") + raise ValueError(f"unexpected frequency: {dtype_code}") periodsperyear = 365 * periodsperday periodspermonth = 28 * periodsperday - elif freq == FreqGroup.FR_BUS: + elif dtype_code == FreqGroup.FR_BUS: periodsperyear = 261 periodspermonth = 19 - elif freq == FreqGroup.FR_DAY: + elif dtype_code == FreqGroup.FR_DAY: periodsperyear = 365 periodspermonth = 28 - elif get_freq_group(freq) == FreqGroup.FR_WK: + elif get_freq_group(dtype_code) == FreqGroup.FR_WK: periodsperyear = 52 periodspermonth = 3 else: # pragma: no cover @@ -676,7 +680,7 @@ def _second_finder(label_interval): elif span <= periodsperyear // 4: month_start = period_break(dates_, "month") info_maj[month_start] = True - if freq < FreqGroup.FR_HR: + if dtype_code < FreqGroup.FR_HR: info["min"] = True else: day_start = period_break(dates_, "day") @@ -884,21 +888,20 @@ def _annual_finder(vmin, vmax, freq): return info -def get_finder(freq): - if isinstance(freq, str): - freq = get_freq_code(freq)[0] - fgroup = get_freq_group(freq) +def get_finder(freq: BaseOffset): + dtype_code = freq._period_dtype_code + fgroup = (dtype_code // 1000) * 1000 if fgroup == FreqGroup.FR_ANN: return _annual_finder elif fgroup == FreqGroup.FR_QTR: return _quarterly_finder - elif freq == FreqGroup.FR_MTH: + elif dtype_code == FreqGroup.FR_MTH: return _monthly_finder - elif (freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK: + elif (dtype_code >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK: return _daily_finder else: # pragma: no cover - raise NotImplementedError(f"Unsupported frequency: {freq}") + raise NotImplementedError(f"Unsupported frequency: {dtype_code}") class TimeSeries_DateLocator(Locator): @@ -930,8 +933,7 @@ def __init__( day=1, plot_obj=None, ): - if isinstance(freq, str): - freq = get_freq_code(freq)[0] + freq = to_offset(freq) self.freq = freq self.base = base (self.quarter, self.month, self.day) = (quarter, month, day) @@ -1009,8 +1011,7 @@ class TimeSeries_DateFormatter(Formatter): """ def __init__(self, freq, minor_locator=False, dynamic_mode=True, plot_obj=None): - if isinstance(freq, str): - freq = get_freq_code(freq)[0] + freq = to_offset(freq) self.format = None self.freq = freq self.locs = [] diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 738df5244955a..fa129167a744f 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -6,6 +6,7 @@ import numpy as np import pytest +from pandas._libs.tslibs import to_offset import pandas.util._test_decorators as td from pandas import DataFrame, Index, NaT, Series, isna @@ -397,12 +398,12 @@ def _test(ax): def test_get_finder(self): import pandas.plotting._matplotlib.converter as conv - assert conv.get_finder("B") == conv._daily_finder - assert conv.get_finder("D") == conv._daily_finder - assert conv.get_finder("M") == conv._monthly_finder - assert conv.get_finder("Q") == conv._quarterly_finder - assert conv.get_finder("A") == conv._annual_finder - assert conv.get_finder("W") == conv._daily_finder + assert conv.get_finder(to_offset("B")) == conv._daily_finder + assert conv.get_finder(to_offset("D")) == conv._daily_finder + assert conv.get_finder(to_offset("M")) == conv._monthly_finder + assert conv.get_finder(to_offset("Q")) == conv._quarterly_finder + assert conv.get_finder(to_offset("A")) == conv._annual_finder + assert conv.get_finder(to_offset("W")) == conv._daily_finder @pytest.mark.slow def test_finder_daily(self):
https://api.github.com/repos/pandas-dev/pandas/pulls/34585
2020-06-04T23:18:01Z
2020-06-08T02:20:33Z
2020-06-08T02:20:33Z
2020-06-08T02:31:24Z
solves ci issues with #34575
diff --git a/asv_bench/benchmarks/arithmetic.py b/asv_bench/benchmarks/arithmetic.py index a9a8a4f3add92..3ef6ab6209ea7 100644 --- a/asv_bench/benchmarks/arithmetic.py +++ b/asv_bench/benchmarks/arithmetic.py @@ -466,7 +466,7 @@ def setup(self, offset): self.rng = rng def time_apply_index(self, offset): - offset.apply_index(self.rng) + self.rng + offset class BinaryOpsMultiIndex:
- [x] closes #34575 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` solved issues with benchmarks benchmarks running fine now Current right output ``` · Discovering benchmarks · Running 1 total benchmarks (1 commits * 1 environments * 1 benchmarks) [ 0.00%] ·· Benchmarking existing-py_home_moha_venv_pandas-dev_bin_python [ 50.00%] ··· arithmetic.ApplyIndex.time_apply_indexgit ok [ 50.00%] ··· =================================== ========== offset ----------------------------------- ---------- <YearEnd: month=12> 2.05±0ms <YearBegin: month=1> 1.56±0ms <QuarterEnd: startingMonth=3> 4.41±0ms <QuarterBegin: startingMonth=3> 2.50±0ms <MonthEnd> 4.32±0ms <MonthBegin> 2.31±0ms <DateOffset: days=2, months=2> 5.67±0ms <BusinessDay> 10.4±0ms <SemiMonthEnd: day_of_month=15> 14.5±0ms <SemiMonthBegin: day_of_month=15> 14.2±0ms =================================== ========== ```
https://api.github.com/repos/pandas-dev/pandas/pulls/34579
2020-06-04T18:04:59Z
2020-06-04T19:04:33Z
2020-06-04T19:04:33Z
2020-06-04T19:04:33Z
DOC: fixed PR06 in pandas.Timedeltas
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index eb04049f18e0c..a239804ea7bc2 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1093,7 +1093,7 @@ class Timedelta(_Timedelta): Parameters ---------- - value : Timedelta, timedelta, np.timedelta64, string, or integer + value : Timedelta, timedelta, np.timedelta64, str, or int unit : str, default 'ns' Denote the unit of the input, if input is an integer.
https://api.github.com/repos/pandas-dev/pandas/pulls/34574
2020-06-04T15:25:37Z
2020-06-04T19:06:40Z
2020-06-04T19:06:40Z
2020-06-04T19:06:45Z
TST: Added test to check that the freqstr attribute of the index is p…
diff --git a/pandas/tests/series/methods/test_shift.py b/pandas/tests/series/methods/test_shift.py index 686e66162fe0b..f981e98100d31 100644 --- a/pandas/tests/series/methods/test_shift.py +++ b/pandas/tests/series/methods/test_shift.py @@ -276,3 +276,19 @@ def test_shift_dt64values_int_fill_deprecated(self): expected = pd.Series([pd.Timestamp(0), ser[0]]) tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("periods", [1, 2, 3, 4]) + def test_shift_preserve_freqstr(self, periods): + # GH#21275 + ser = pd.Series( + range(periods), + index=pd.date_range("2016-1-1 00:00:00", periods=periods, freq="H"), + ) + + result = ser.shift(1, "2H") + + expected = pd.Series( + range(periods), + index=pd.date_range("2016-1-1 02:00:00", periods=periods, freq="H"), + ) + tm.assert_series_equal(result, expected)
…reserved after a shift operation. Run black and flake8. - [x] closes #21275 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry (omitted, test only)
https://api.github.com/repos/pandas-dev/pandas/pulls/34572
2020-06-04T13:18:07Z
2020-06-04T19:09:56Z
2020-06-04T19:09:55Z
2020-06-04T19:10:00Z
Export InvalidIndexError
diff --git a/doc/source/reference/general_utility_functions.rst b/doc/source/reference/general_utility_functions.rst index 993107dc09756..72a84217323ab 100644 --- a/doc/source/reference/general_utility_functions.rst +++ b/doc/source/reference/general_utility_functions.rst @@ -38,10 +38,11 @@ Exceptions and warnings errors.AccessorRegistrationWarning errors.DtypeWarning errors.EmptyDataError - errors.OutOfBoundsDatetime + errors.InvalidIndexError errors.MergeError errors.NullFrequencyError errors.NumbaUtilError + errors.OutOfBoundsDatetime errors.ParserError errors.ParserWarning errors.PerformanceWarning diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index e680c2db55a43..87d690ad5fc59 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -382,7 +382,7 @@ Other API changes - ``loc`` lookups with an object-dtype :class:`Index` and an integer key will now raise ``KeyError`` instead of ``TypeError`` when key is missing (:issue:`31905`) - Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median``, ``skew``, ``cov``, ``corr`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`) - Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations. -- +- Added :class:`pandas.errors.InvalidIndexError` (:issue:`34570`). Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c340460857b9f..dfe20654b4988 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -47,7 +47,7 @@ from pandas.compat import set_function_name from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv -from pandas.errors import AbstractMethodError +from pandas.errors import AbstractMethodError, InvalidIndexError from pandas.util._decorators import ( Appender, Substitution, @@ -90,13 +90,7 @@ from pandas.core.base import PandasObject, SelectionMixin import pandas.core.common as com from pandas.core.construction import create_series_with_explicit_dtype -from pandas.core.indexes.api import ( - Index, - InvalidIndexError, - MultiIndex, - RangeIndex, - ensure_index, -) +from pandas.core.indexes.api import Index, MultiIndex, RangeIndex, ensure_index from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import Period, PeriodIndex import pandas.core.indexing as indexing diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 39892d87bfd69..67003dffb90bb 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -8,6 +8,7 @@ import numpy as np from pandas._typing import FrameOrSeries +from pandas.errors import InvalidIndexError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( @@ -26,7 +27,6 @@ from pandas.core.groupby import ops from pandas.core.groupby.categorical import recode_for_groupby, recode_from_groupby from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex -from pandas.core.indexes.base import InvalidIndexError from pandas.core.series import Series from pandas.io.formats.printing import pprint_thing diff --git a/pandas/core/index.py b/pandas/core/index.py index 8cff53d7a8b74..a315b9619b0e7 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -7,7 +7,6 @@ Index, Int64Index, IntervalIndex, - InvalidIndexError, MultiIndex, NaT, NumericIndex, diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index fcce82e7a69db..4c5a70f4088ee 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -2,11 +2,11 @@ from typing import List, Set from pandas._libs import NaT, lib +from pandas.errors import InvalidIndexError import pandas.core.common as com from pandas.core.indexes.base import ( Index, - InvalidIndexError, _new_Index, ensure_index, ensure_index_from_sequences, diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 4a99d2dfe339a..c046d6465ce67 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -16,6 +16,7 @@ from pandas._typing import DtypeObj, Label from pandas.compat import set_function_name from pandas.compat.numpy import function as nv +from pandas.errors import InvalidIndexError from pandas.util._decorators import Appender, Substitution, cache_readonly, doc from pandas.core.dtypes import concat as _concat @@ -153,10 +154,6 @@ def index_arithmetic_method(self, other): return set_function_name(index_arithmetic_method, name, cls) -class InvalidIndexError(Exception): - pass - - _o_dtype = np.dtype(object) _Identity = object diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6bcfb3bccf5c7..f3c96db0a8d6e 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -9,6 +9,7 @@ from pandas._libs.tslibs import Resolution, fields, parsing, timezones, to_offset from pandas._libs.tslibs.offsets import prefix_mapping from pandas._typing import DtypeObj, Label +from pandas.errors import InvalidIndexError from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.common import ( @@ -24,7 +25,7 @@ from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype import pandas.core.common as com -from pandas.core.indexes.base import Index, InvalidIndexError, maybe_extract_name +from pandas.core.indexes.base import Index, maybe_extract_name from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin from pandas.core.indexes.extension import inherit_names from pandas.core.tools.times import to_time diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 1a59e066879cc..3be2bcd4888cb 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -11,6 +11,7 @@ from pandas._libs.interval import Interval, IntervalMixin, IntervalTree from pandas._libs.tslibs import Timedelta, Timestamp, to_offset from pandas._typing import AnyArrayLike, Label +from pandas.errors import InvalidIndexError from pandas.util._decorators import Appender, Substitution, cache_readonly from pandas.util._exceptions import rewrite_exception @@ -44,7 +45,6 @@ import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, - InvalidIndexError, _index_shared_docs, default_pprint, ensure_index, diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fc2d4cf4621c4..a09e5a657f9fb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -20,7 +20,7 @@ from pandas._libs.hashtable import duplicated_int64 from pandas._typing import AnyArrayLike, Scalar from pandas.compat.numpy import function as nv -from pandas.errors import PerformanceWarning, UnsortedIndexError +from pandas.errors import InvalidIndexError, PerformanceWarning, UnsortedIndexError from pandas.util._decorators import Appender, cache_readonly, doc from pandas.core.dtypes.cast import coerce_indexer_dtype @@ -45,12 +45,7 @@ from pandas.core.arrays.categorical import factorize_from_iterables import pandas.core.common as com import pandas.core.indexes.base as ibase -from pandas.core.indexes.base import ( - Index, - InvalidIndexError, - _index_shared_docs, - ensure_index, -) +from pandas.core.indexes.base import Index, _index_shared_docs, ensure_index from pandas.core.indexes.frozen import FrozenList from pandas.core.indexes.numeric import Int64Index import pandas.core.missing as missing diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 31783f6dbaaf7..68c2b44b23964 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -8,6 +8,7 @@ from pandas._libs.tslibs import Period, Resolution from pandas._libs.tslibs.parsing import DateParseError, parse_time_string from pandas._typing import DtypeObj, Label +from pandas.errors import InvalidIndexError from pandas.util._decorators import Appender, cache_readonly, doc from pandas.core.dtypes.common import ( @@ -32,7 +33,6 @@ import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( - InvalidIndexError, _index_shared_docs, ensure_index, maybe_extract_name, diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index ce3ff17814a25..a14994866c0f7 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -3,6 +3,7 @@ from pandas._libs import index as libindex, lib from pandas._libs.tslibs import Timedelta, to_offset from pandas._typing import DtypeObj, Label +from pandas.errors import InvalidIndexError from pandas.util._decorators import doc from pandas.core.dtypes.common import ( @@ -18,7 +19,7 @@ from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays.timedeltas import TimedeltaArray import pandas.core.common as com -from pandas.core.indexes.base import Index, InvalidIndexError, maybe_extract_name +from pandas.core.indexes.base import Index, maybe_extract_name from pandas.core.indexes.datetimelike import ( DatetimeIndexOpsMixin, DatetimeTimedeltaMixin, diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 326bd00270eca..9c8b01003bece 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -4,7 +4,7 @@ from pandas._libs.indexing import _NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim -from pandas.errors import AbstractMethodError +from pandas.errors import AbstractMethodError, InvalidIndexError from pandas.util._decorators import doc from pandas.core.dtypes.common import ( @@ -29,7 +29,7 @@ is_list_like_indexer, length_of_indexer, ) -from pandas.core.indexes.api import Index, InvalidIndexError +from pandas.core.indexes.api import Index if TYPE_CHECKING: from pandas import DataFrame # noqa:F401 diff --git a/pandas/core/series.py b/pandas/core/series.py index b32a4c36a8247..a27e44efe1a97 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -34,6 +34,7 @@ ValueKeyFunc, ) from pandas.compat.numpy import function as nv +from pandas.errors import InvalidIndexError from pandas.util._decorators import Appender, Substitution, doc from pandas.util._validators import validate_bool_kwarg, validate_percentile @@ -79,13 +80,7 @@ from pandas.core.generic import NDFrame from pandas.core.indexers import unpack_1tuple from pandas.core.indexes.accessors import CombinedDatetimelikeProperties -from pandas.core.indexes.api import ( - Float64Index, - Index, - InvalidIndexError, - MultiIndex, - ensure_index, -) +from pandas.core.indexes.api import Float64Index, Index, MultiIndex, ensure_index import pandas.core.indexes.base as ibase from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 4c4ce9df85543..e3427d93f3d84 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -200,3 +200,11 @@ class NumbaUtilError(Exception): """ Error raised for unsupported Numba engine routines. """ + + +class InvalidIndexError(Exception): + """ + Exception raised when attemping to use an invalid index key. + + .. versionadded:: 1.1.0 + """ diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 0f9509c372bdf..37ff97f028e81 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -5,6 +5,7 @@ import pytest from pandas._libs import iNaT +from pandas.errors import InvalidIndexError from pandas.core.dtypes.common import is_datetime64tz_dtype from pandas.core.dtypes.dtypes import CategoricalDtype @@ -25,7 +26,6 @@ isna, ) import pandas._testing as tm -from pandas.core.indexes.base import InvalidIndexError from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index f08472fe72631..b1faaa2115f55 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -3,10 +3,11 @@ import numpy as np import pytest +from pandas.errors import InvalidIndexError + import pandas as pd from pandas import DatetimeIndex, Index, Timestamp, date_range, notna import pandas._testing as tm -from pandas.core.indexes.base import InvalidIndexError from pandas.tseries.offsets import BDay, CDay diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py index 718136fca6c80..3abc6e348748a 100644 --- a/pandas/tests/indexes/interval/test_indexing.py +++ b/pandas/tests/indexes/interval/test_indexing.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.errors import InvalidIndexError + from pandas import ( CategoricalIndex, Interval, @@ -12,7 +14,6 @@ timedelta_range, ) import pandas._testing as tm -from pandas.core.indexes.base import InvalidIndexError class TestGetLoc: diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 997887cc18d61..2755b186f3eae 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -4,6 +4,8 @@ import numpy as np import pytest +from pandas.errors import InvalidIndexError + import pandas as pd from pandas import ( Index, @@ -19,7 +21,6 @@ ) import pandas._testing as tm import pandas.core.common as com -from pandas.core.indexes.base import InvalidIndexError @pytest.fixture(scope="class", params=[None, "foo"]) diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 4cc67986ad065..03ae2ae6a1f85 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -3,10 +3,11 @@ import numpy as np import pytest +from pandas.errors import InvalidIndexError + import pandas as pd from pandas import Categorical, Index, MultiIndex, date_range import pandas._testing as tm -from pandas.core.indexes.base import InvalidIndexError class TestSliceLocs: diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 12454c20d2bb4..b61d1d903f89a 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -5,6 +5,7 @@ import pytest from pandas._libs.tslibs import period as libperiod +from pandas.errors import InvalidIndexError import pandas as pd from pandas import ( @@ -19,7 +20,6 @@ period_range, ) import pandas._testing as tm -from pandas.core.indexes.base import InvalidIndexError class TestGetItem: diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index 3db9a91118ebc..fe02eaef8ba82 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -7,11 +7,11 @@ from pandas._libs.tslibs.ccalendar import DAYS, MONTHS from pandas._libs.tslibs.period import IncompatibleFrequency +from pandas.errors import InvalidIndexError import pandas as pd from pandas import DataFrame, Series, Timestamp import pandas._testing as tm -from pandas.core.indexes.base import InvalidIndexError from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import Period, PeriodIndex, period_range from pandas.core.resample import _get_period_range_edges
- [x] closes #xxxx (there is no such an issue) - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34570
2020-06-04T11:52:41Z
2020-06-14T20:37:30Z
2020-06-14T20:37:29Z
2020-06-14T20:37:30Z
CLN: address FIXMEs in liboffsets
diff --git a/pandas/_libs/tslibs/offsets.pxd b/pandas/_libs/tslibs/offsets.pxd index 2b8ad97b83917..9a9244db4a565 100644 --- a/pandas/_libs/tslibs/offsets.pxd +++ b/pandas/_libs/tslibs/offsets.pxd @@ -5,7 +5,7 @@ cdef bint is_offset_object(object obj) cdef bint is_tick_object(object obj) cdef class BaseOffset: - cdef readonly: - int64_t n - bint normalize - dict _cache + cdef readonly: + int64_t n + bint normalize + dict _cache diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 77b60d0c22322..e8f4208574dbe 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1292,11 +1292,7 @@ cdef class BusinessDay(BusinessMixin): self._offset = state.pop("_offset") elif "offset" in state: self._offset = state.pop("offset") - - @property - def _params(self): - # FIXME: using cache_readonly breaks a pytables test - return BaseOffset._params.func(self) + self._cache = state.pop("_cache", {}) def _offset_str(self) -> str: def get_str(td): @@ -1383,8 +1379,6 @@ cdef class BusinessDay(BusinessMixin): if self.n > 0: shifted = (dtindex.to_perioddelta("B") - time).asi8 != 0 - # Integer-array addition is deprecated, so we use - # _time_shift directly roll = np.where(shifted, self.n - 1, self.n) shifted = asper._addsub_int_array(roll, operator.add) else: @@ -2465,12 +2459,7 @@ cdef class Week(SingleConstructorOffset): self.n = state.pop("n") self.normalize = state.pop("normalize") self.weekday = state.pop("weekday") - - @property - def _params(self): - # TODO: making this into a property shouldn't be necessary, but otherwise - # we unpickle legacy objects incorrectly - return BaseOffset._params.func(self) + self._cache = state.pop("_cache", {}) def is_anchored(self) -> bool: return self.n == 1 and self.weekday is not None @@ -2520,7 +2509,7 @@ cdef class Week(SingleConstructorOffset): from .frequencies import get_freq_code # TODO: avoid circular import i8other = dtindex.asi8 - off = (i8other % DAY_NANOS).view("timedelta64") + off = (i8other % DAY_NANOS).view("timedelta64[ns]") base, mult = get_freq_code(self.freqstr) base_period = dtindex.to_period(base)
https://api.github.com/repos/pandas-dev/pandas/pulls/34566
2020-06-04T04:12:53Z
2020-06-04T19:07:36Z
2020-06-04T19:07:36Z
2020-06-04T19:38:38Z
CLN: circular/runtime imports in tslibs
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 77b60d0c22322..f37705b255298 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -31,8 +31,6 @@ from pandas._libs.tslibs.util cimport ( is_float_object, ) -from pandas._libs.tslibs.base cimport ABCTimestamp - from pandas._libs.tslibs.ccalendar import ( MONTH_ALIASES, MONTH_TO_CAL_NUM, weekday_to_int, int_to_weekday, ) @@ -49,7 +47,9 @@ from pandas._libs.tslibs.timezones cimport utc_pytz as UTC from pandas._libs.tslibs.tzconversion cimport tz_convert_single from .timedeltas cimport delta_to_nanoseconds - +from .timedeltas import Timedelta +from .timestamps cimport _Timestamp +from .timestamps import Timestamp # --------------------------------------------------------------------- # Misc Helpers @@ -63,7 +63,7 @@ cdef bint is_tick_object(object obj): cdef datetime _as_datetime(datetime obj): - if isinstance(obj, ABCTimestamp): + if isinstance(obj, _Timestamp): return obj.to_pydatetime() return obj @@ -72,7 +72,7 @@ cdef bint _is_normalized(datetime dt): if dt.hour != 0 or dt.minute != 0 or dt.second != 0 or dt.microsecond != 0: # Regardless of whether dt is datetime vs Timestamp return False - if isinstance(dt, ABCTimestamp): + if isinstance(dt, _Timestamp): return dt.nanosecond == 0 return True @@ -107,7 +107,6 @@ def apply_wraps(func): # not play nicely with cython class methods def wrapper(self, other): - from pandas import Timestamp if other is NaT: return NaT @@ -584,7 +583,6 @@ cdef class BaseOffset: TimeStamp Rolled timestamp if not on offset, otherwise unchanged timestamp. """ - from pandas import Timestamp dt = Timestamp(dt) if not self.is_on_offset(dt): dt = dt - type(self)(1, normalize=self.normalize, **self.kwds) @@ -599,7 +597,6 @@ cdef class BaseOffset: TimeStamp Rolled timestamp if not on offset, otherwise unchanged timestamp. """ - from pandas import Timestamp dt = Timestamp(dt) if not self.is_on_offset(dt): dt = dt + type(self)(1, normalize=self.normalize, **self.kwds) @@ -766,7 +763,6 @@ cdef class Tick(SingleConstructorOffset): @property def delta(self): - from .timedeltas import Timedelta return self.n * Timedelta(self._nanos_inc) @property @@ -853,7 +849,7 @@ cdef class Tick(SingleConstructorOffset): def apply(self, other): # Timestamp can handle tz and nano sec, thus no need to use apply_wraps - if isinstance(other, ABCTimestamp): + if isinstance(other, _Timestamp): # GH#15126 # in order to avoid a recursive @@ -868,7 +864,6 @@ cdef class Tick(SingleConstructorOffset): return NaT elif is_datetime64_object(other) or PyDate_Check(other): # PyDate_Check includes date, datetime - from pandas import Timestamp return Timestamp(other) + self if PyDelta_Check(other): @@ -1020,7 +1015,6 @@ cdef class RelativeDeltaOffset(BaseOffset): # bring tz back from UTC calculation other = localize_pydatetime(other, tzinfo) - from .timestamps import Timestamp return Timestamp(other) else: return other + timedelta(self.n) @@ -1069,7 +1063,6 @@ cdef class RelativeDeltaOffset(BaseOffset): if k in ["days", "hours", "minutes", "seconds", "microseconds"] } if timedelta_kwds: - from .timedeltas import Timedelta delta = Timedelta(**timedelta_kwds) index = index + (self.n * delta) return index @@ -2265,7 +2258,6 @@ cdef class SemiMonthOffset(SingleConstructorOffset): @apply_index_wraps def apply_index(self, dtindex): # determine how many days away from the 1st of the month we are - from pandas import Timedelta dti = dtindex i8other = dtindex.asi8 @@ -2368,8 +2360,6 @@ cdef class SemiMonthEnd(SemiMonthOffset): ------- result : DatetimeIndex """ - from pandas import Timedelta - nanos = (roll % 2) * Timedelta(days=self.day_of_month).value dtindex += nanos.astype("timedelta64[ns]") return dtindex + Timedelta(days=-1) @@ -2427,7 +2417,6 @@ cdef class SemiMonthBegin(SemiMonthOffset): ------- result : DatetimeIndex """ - from pandas import Timedelta nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value return dtindex + nanos.astype("timedelta64[ns]") @@ -2516,7 +2505,6 @@ cdef class Week(SingleConstructorOffset): ------- result : DatetimeIndex """ - from pandas import Timedelta from .frequencies import get_freq_code # TODO: avoid circular import i8other = dtindex.asi8 @@ -2818,8 +2806,6 @@ cdef class FY5253(FY5253Mixin): @apply_wraps def apply(self, other): - from pandas import Timestamp - norm = Timestamp(other).normalize() n = self.n @@ -3040,8 +3026,6 @@ cdef class FY5253Quarter(FY5253Mixin): num_qtrs : int tdelta : Timedelta """ - from pandas import Timestamp, Timedelta - num_qtrs = 0 norm = Timestamp(other).tz_localize(None) @@ -3072,7 +3056,6 @@ cdef class FY5253Quarter(FY5253Mixin): @apply_wraps def apply(self, other): # Note: self.n == 0 is not allowed. - from pandas import Timedelta n = self.n @@ -3112,8 +3095,6 @@ cdef class FY5253Quarter(FY5253Mixin): def year_has_extra_week(self, dt: datetime) -> bool: # Avoid round-down errors --> normalize to get # e.g. '370D' instead of '360D23H' - from pandas import Timestamp - norm = Timestamp(dt).normalize().tz_localize(None) next_year_end = self._offset.rollforward(norm) @@ -3592,9 +3573,6 @@ cpdef to_offset(freq): >>> to_offset(Hour()) <Hour> """ - # TODO: avoid runtime imports - from pandas._libs.tslibs.timedeltas import Timedelta - if freq is None: return None diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index a90d06fa53997..02fe203637d62 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -20,7 +20,6 @@ from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, HOUR_NANOS from pandas._libs.tslibs.nattype cimport NPY_NAT from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dt64_to_dtstruct) -from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds from pandas._libs.tslibs.timezones cimport ( get_dst_info, is_tzlocal, is_utc, get_timezone, get_utcoffset) @@ -123,6 +122,7 @@ timedelta-like} elif nonexistent == 'shift_backward': shift_backward = True elif PyDelta_Check(nonexistent): + from .timedeltas import delta_to_nanoseconds shift_delta = delta_to_nanoseconds(nonexistent) elif nonexistent not in ('raise', None): msg = ("nonexistent must be one of {'NaT', 'raise', 'shift_forward', "
ATM tzconversion depends on timedeltas (for delta_to_nanoseconds), which throws a wrench in the erstwhile dependency hierarchy. By making the one usage of delta_to_nanoseconds a runtime import, we make it possible to remove a bunch of other runtime imports.
https://api.github.com/repos/pandas-dev/pandas/pulls/34563
2020-06-04T00:31:13Z
2020-06-04T17:42:14Z
2020-06-04T17:42:14Z
2020-06-04T18:15:40Z
DOC: Fixed PR06 (wrong parameter type) in pandas.Timestamp
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 6dc49914ef4b7..f079c5157eeb3 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -397,7 +397,7 @@ class NaTType(_NaT): Parameters ---------- - locale : string, default None (English locale) + locale : str, default None (English locale) Locale determining the language in which to return the month name. Returns @@ -414,7 +414,7 @@ class NaTType(_NaT): Parameters ---------- - locale : string, default None (English locale) + locale : str, default None (English locale) Locale determining the language in which to return the day name. Returns diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index fad87f9f910cb..471ed557f4327 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1053,7 +1053,7 @@ timedelta}, default 'raise' Parameters ---------- - locale : string, default None (English locale) + locale : str, default None (English locale) Locale determining the language in which to return the day name. Returns @@ -1070,7 +1070,7 @@ timedelta}, default 'raise' Parameters ---------- - locale : string, default None (English locale) + locale : str, default None (English locale) Locale determining the language in which to return the month name. Returns
https://api.github.com/repos/pandas-dev/pandas/pulls/34561
2020-06-03T22:18:19Z
2020-06-04T07:04:57Z
2020-06-04T07:04:57Z
2020-06-04T07:04:57Z
BUG: fix resolution_string docstring
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 10c1a56a2eb4e..de02d45337b38 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -907,19 +907,19 @@ cdef class _Timedelta(ABCTimedelta): Examples -------- >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') - >>> td.resolution + >>> td.resolution_string 'N' >>> td = pd.Timedelta('1 days 2 min 3 us') - >>> td.resolution + >>> td.resolution_string 'U' >>> td = pd.Timedelta('2 min 3 s') - >>> td.resolution + >>> td.resolution_string 'S' >>> td = pd.Timedelta(36, unit='us') - >>> td.resolution + >>> td.resolution_string 'U' """ self._ensure_components()
broken off from #34499
https://api.github.com/repos/pandas-dev/pandas/pulls/34560
2020-06-03T21:15:55Z
2020-06-03T22:28:48Z
2020-06-03T22:28:48Z
2020-06-03T22:32:13Z
TYP: fix incorrect mypy error in reshape.py
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 7edd5af7c6870..d043e3ad53f9a 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -39,6 +39,7 @@ Index, MultiIndex, ) +from pandas.core.indexes.frozen import FrozenList from pandas.core.series import Series from pandas.core.sorting import ( compress_group_index, @@ -316,15 +317,16 @@ def get_new_columns(self, value_columns: Index | None): stride = len(self.removed_level) + self.lift width = len(value_columns) propagator = np.repeat(np.arange(width), stride) + + new_levels: FrozenList | list[Index] + if isinstance(value_columns, MultiIndex): new_levels = value_columns.levels + (self.removed_level_full,) new_names = value_columns.names + (self.removed_name,) new_codes = [lab.take(propagator) for lab in value_columns.codes] else: - # error: Incompatible types in assignment (expression has type "List[Any]", - # variable has type "FrozenList") - new_levels = [ # type: ignore[assignment] + new_levels = [ value_columns, self.removed_level_full, ]
xref #37715 ````new_levels```` is not defined as a ````FrozenList```` within ````get_new_colums````
https://api.github.com/repos/pandas-dev/pandas/pulls/45127
2021-12-30T15:00:39Z
2021-12-31T15:00:19Z
2021-12-31T15:00:19Z
2021-12-31T15:15:16Z
BUG: Operations with SparseArray return SA with wrong indices
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 538d4e7e4a7aa..2a21498a5d1e4 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1680,13 +1680,14 @@ def _cmp_method(self, other, op) -> SparseArray: op_name = op.__name__.strip("_") return _sparse_array_op(self, other, op, op_name) else: + # scalar with np.errstate(all="ignore"): fill_value = op(self.fill_value, other) - result = op(self.sp_values, other) + result = np.full(len(self), fill_value, dtype=np.bool_) + result[self.sp_index.indices] = op(self.sp_values, other) return type(self)( result, - sparse_index=self.sp_index, fill_value=fill_value, dtype=np.bool_, ) diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index 012fe61fdba05..3db1ee9faad78 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -32,6 +32,7 @@ class TestSparseArrayArithmetics: _klass = SparseArray def _assert(self, a, b): + # We have to use tm.assert_sp_array_equal. See GH #45126 tm.assert_numpy_array_equal(a, b) def _check_numeric_ops(self, a, b, a_dense, b_dense, mix: bool, op): diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 2c3dcdeeaf8dc..0ebe03d9a1198 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -248,8 +248,8 @@ def test_scalar_with_index_infer_dtype(self, scalar, dtype): assert arr.dtype == dtype assert exp.dtype == dtype - # GH 23122 def test_getitem_bool_sparse_array(self): + # GH 23122 spar_bool = SparseArray([False, True] * 5, dtype=np.bool8, fill_value=True) exp = SparseArray([np.nan, 2, np.nan, 5, 6]) tm.assert_sp_array_equal(self.arr[spar_bool], exp) @@ -266,6 +266,13 @@ def test_getitem_bool_sparse_array(self): exp = SparseArray([np.nan, 3, 5]) tm.assert_sp_array_equal(res, exp) + def test_getitem_bool_sparse_array_as_comparison(self): + # GH 45110 + arr = SparseArray([1, 2, 3, 4, np.nan, np.nan], fill_value=np.nan) + res = arr[arr > 2] + exp = SparseArray([3.0, 4.0], fill_value=np.nan) + tm.assert_sp_array_equal(res, exp) + def test_get_item(self): assert np.isnan(self.arr[1]) diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index f7809dc2e4217..5611889a9368c 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -100,6 +100,11 @@ def data_for_grouping(request): return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3], fill_value=request.param) +@pytest.fixture(params=[0, np.nan]) +def data_for_compare(request): + return SparseArray([0, 0, np.nan, -2, -1, 4, 2, 3, 0, 0], fill_value=request.param) + + class BaseSparseTests: def _check_unsupported(self, data): if data.dtype == SparseDtype(int, 0): @@ -432,32 +437,45 @@ def _check_divmod_op(self, ser, op, other, exc=NotImplementedError): super()._check_divmod_op(ser, op, other, exc=None) -class TestComparisonOps(BaseSparseTests, base.BaseComparisonOpsTests): - def _compare_other(self, s, data, comparison_op, other): +class TestComparisonOps(BaseSparseTests): + def _compare_other(self, data_for_compare: SparseArray, comparison_op, other): op = comparison_op - # array - result = pd.Series(op(data, other)) - # hard to test the fill value, since we don't know what expected - # is in general. - # Rely on tests in `tests/sparse` to validate that. - assert isinstance(result.dtype, SparseDtype) - assert result.dtype.subtype == np.dtype("bool") - - with np.errstate(all="ignore"): - expected = pd.Series( - SparseArray( - op(np.asarray(data), np.asarray(other)), - fill_value=result.values.fill_value, - ) + result = op(data_for_compare, other) + assert isinstance(result, SparseArray) + assert result.dtype.subtype == np.bool_ + + if isinstance(other, SparseArray): + fill_value = op(data_for_compare.fill_value, other.fill_value) + else: + fill_value = np.all( + op(np.asarray(data_for_compare.fill_value), np.asarray(other)) ) - tm.assert_series_equal(result, expected) + expected = SparseArray( + op(data_for_compare.to_dense(), np.asarray(other)), + fill_value=fill_value, + dtype=np.bool_, + ) + tm.assert_sp_array_equal(result, expected) - # series - ser = pd.Series(data) - result = op(ser, other) - tm.assert_series_equal(result, expected) + def test_scalar(self, data_for_compare: SparseArray, comparison_op): + self._compare_other(data_for_compare, comparison_op, 0) + self._compare_other(data_for_compare, comparison_op, 1) + self._compare_other(data_for_compare, comparison_op, -1) + self._compare_other(data_for_compare, comparison_op, np.nan) + + @pytest.mark.xfail(reason="Wrong indices") + def test_array(self, data_for_compare: SparseArray, comparison_op): + arr = np.linspace(-4, 5, 10) + self._compare_other(data_for_compare, comparison_op, arr) + + @pytest.mark.xfail(reason="Wrong indices") + def test_sparse_array(self, data_for_compare: SparseArray, comparison_op): + arr = data_for_compare + 1 + self._compare_other(data_for_compare, comparison_op, arr) + arr = data_for_compare * 2 + self._compare_other(data_for_compare, comparison_op, arr) class TestPrinting(BaseSparseTests, base.BasePrintingTests):
- [ ] closes #44956, #45110 - [ ] tests added / passed Two tests have failed but looks like the reason is not my changes (Perhaps won't see in checks) <details> ====================================================================== FAILURES ======================================================================= __________________________________________ TestChaining.test_detect_chained_assignment_warning_stacklevel[3] __________________________________________ self = <pandas.tests.indexing.test_chaining_and_caching.TestChaining object at 0x0000022FC3FBFB80>, rhs = 3 @pytest.mark.parametrize("rhs", [3, DataFrame({0: [1, 2, 3, 4]})]) def test_detect_chained_assignment_warning_stacklevel(self, rhs): # GH#42570 df = DataFrame(np.arange(25).reshape(5, 5)) chained = df.loc[:3] with option_context("chained_assignment", "warn"): with tm.assert_produces_warning(com.SettingWithCopyWarning) as t: chained[2] = rhs > assert t[0].filename == __file__ E AssertionError: assert 'c:\\Users\\b...nd_caching.py' == 'C:\\Users\\b...nd_caching.py' E - C:\Users\bdrum\Development\python\pandas\pandas\tests\indexing\test_chaining_and_caching.py E ? ^ E + c:\Users\bdrum\Development\python\pandas\pandas\tests\indexing\test_chaining_and_caching.py E ? ^ pandas\tests\indexing\test_chaining_and_caching.py:444: AssertionError ------------------------------------- generated xml file: C:\Users\bdrum\Development\python\pandas\test-data.xml -------------------------------------- ================================================================ slowest 30 durations ================================================================= 0.29s call pandas/tests/indexing/test_chaining_and_caching.py::TestChaining::test_detect_chained_assignment_warning_stacklevel[3] (2 durations < 0.005s hidden. Use -vv to show these durations.) =============================================================== short test summary info =============================================================== FAILED pandas/tests/indexing/test_chaining_and_caching.py::TestChaining::test_detect_chained_assignment_warning_stacklevel[3] - AssertionError: asser... ========================================================== 1 failed, 31 deselected in 0.84s =========================================================== </details> - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry already there This is only part of solutiion in order to close regression. I will create separate issue that describes global SparseArray indices problem. Current behavior as expected in #45110 ~~~python s = pd.arrays.SparseArray([1, 2, 3, 4, np.nan, np.nan], fill_value=np.nan) s[s>2] # [3.0, 4.0] # Fill: nan # IntIndex # Indices: array([0, 1]) ~~~
https://api.github.com/repos/pandas-dev/pandas/pulls/45125
2021-12-30T13:03:30Z
2022-01-08T21:00:54Z
2022-01-08T21:00:54Z
2022-01-09T06:30:10Z
BUG: IntegerArray/FloatingArray ufunc with 'out' kwd
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 22d6ae94863a1..b6ae804aa27b3 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -939,6 +939,7 @@ ExtensionArray - Bug in :func:`array` failing to preserve :class:`PandasArray` (:issue:`43887`) - NumPy ufuncs ``np.abs``, ``np.positive``, ``np.negative`` now correctly preserve dtype when called on ExtensionArrays that implement ``__abs__, __pos__, __neg__``, respectively. In particular this is fixed for :class:`TimedeltaArray` (:issue:`43899`, :issue:`23316`) - NumPy ufuncs ``np.minimum.reduce`` ``np.maximum.reduce``, ``np.add.reduce``, and ``np.prod.reduce`` now work correctly instead of raising ``NotImplementedError`` on :class:`Series` with ``IntegerDtype`` or ``FloatDtype`` (:issue:`43923`, :issue:`44793`) +- NumPy ufuncs with ``out`` keyword are now supported by arrays with ``IntegerDtype`` and ``FloatingDtype`` (:issue:`??`) - Avoid raising ``PerformanceWarning`` about fragmented DataFrame when using many columns with an extension dtype (:issue:`44098`) - Bug in :class:`IntegerArray` and :class:`FloatingArray` construction incorrectly coercing mismatched NA values (e.g. ``np.timedelta64("NaT")``) to numeric NA (:issue:`44514`) - Bug in :meth:`BooleanArray.__eq__` and :meth:`BooleanArray.__ne__` raising ``TypeError`` on comparison with an incompatible type (like a string). This caused :meth:`DataFrame.replace` to sometimes raise a ``TypeError`` if a nullable boolean column was included (:issue:`44499`) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 782fad435c1c5..91415758bda3c 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -433,6 +433,12 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): if result is not NotImplemented: return result + if "out" in kwargs: + # e.g. test_ufunc_with_out + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 8a3d892876b5c..4a673fcc6439a 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -92,14 +92,15 @@ def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]: # GH#29553 prevent numpy deprecation warnings pass else: - mask |= arr == x + new_mask = arr == x + if not isinstance(new_mask, np.ndarray): + # usually BooleanArray + new_mask = new_mask.to_numpy(dtype=bool, na_value=False) + mask |= new_mask if na_mask.any(): mask |= isna(arr) - if not isinstance(mask, np.ndarray): - # e.g. if arr is IntegerArray, then mask is BooleanArray - mask = mask.to_numpy(dtype=bool, na_value=False) return mask diff --git a/pandas/tests/arrays/masked_shared.py b/pandas/tests/arrays/masked_shared.py index 091412e1ec686..454be70cf405b 100644 --- a/pandas/tests/arrays/masked_shared.py +++ b/pandas/tests/arrays/masked_shared.py @@ -1,6 +1,8 @@ """ Tests shared by MaskedArray subclasses. """ +import numpy as np +import pytest import pandas as pd import pandas._testing as tm @@ -102,3 +104,35 @@ def test_compare_to_string(self, dtype): expected = pd.Series([False, pd.NA], dtype="boolean") self.assert_series_equal(result, expected) + + def test_ufunc_with_out(self, dtype): + arr = pd.array([1, 2, 3], dtype=dtype) + arr2 = pd.array([1, 2, pd.NA], dtype=dtype) + + mask = arr == arr + mask2 = arr2 == arr2 + + result = np.zeros(3, dtype=bool) + result |= mask + # If MaskedArray.__array_ufunc__ handled "out" appropriately, + # `result` should still be an ndarray. + assert isinstance(result, np.ndarray) + assert result.all() + + # result |= mask worked because mask could be cast lossslessly to + # boolean ndarray. mask2 can't, so this raises + result = np.zeros(3, dtype=bool) + msg = "Specify an appropriate 'na_value' for this dtype" + with pytest.raises(ValueError, match=msg): + result |= mask2 + + # addition + res = np.add(arr, arr2) + expected = pd.array([2, 4, pd.NA], dtype=dtype) + tm.assert_extension_array_equal(res, expected) + + # when passing out=arr, we will modify 'arr' inplace. + res = np.add(arr, arr2, out=arr) + assert res is arr + tm.assert_extension_array_equal(res, expected) + tm.assert_extension_array_equal(arr, expected)
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/45122
2021-12-30T02:23:20Z
2021-12-31T16:37:07Z
2021-12-31T16:37:07Z
2021-12-31T19:01:47Z
BUG: Series.__setitem__ failing to cast numeric values
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 22d6ae94863a1..65f4d7099e336 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -800,6 +800,7 @@ Indexing - Bug in :meth:`IntervalIndex.get_indexer_non_unique` not handling targets of ``dtype`` 'object' with NaNs correctly (:issue:`44482`) - Fixed regression where a single column ``np.matrix`` was no longer coerced to a 1d ``np.ndarray`` when added to a :class:`DataFrame` (:issue:`42376`) - Bug in :meth:`Series.__getitem__` with a :class:`CategoricalIndex` of integers treating lists of integers as positional indexers, inconsistent with the behavior with a single scalar integer (:issue:`15470`, :issue:`14865`) +- Bug in :meth:`Series.__setitem__` when setting floats or integers into integer-dtype series failing to upcast when necessary to retain precision (:issue:`45121`) - Missing diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 4f4eac828fd60..f18f1c760ca28 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -2209,6 +2209,12 @@ def can_hold_element(arr: ArrayLike, element: Any) -> bool: # Anything other than integer we cannot hold return False elif dtype.itemsize < tipo.itemsize: + if is_integer(element): + # e.g. test_setitem_series_int8 if we have a python int 1 + # tipo may be np.int32, despite the fact that it will fit + # in smaller int dtypes. + info = np.iinfo(dtype) + return info.min <= element <= info.max return False elif not isinstance(tipo, np.dtype): # i.e. nullable IntegerDtype; we can put this into an ndarray diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 252534a0cb790..9b35d9ce39ec6 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -92,6 +92,7 @@ ) from pandas.core.dtypes.cast import ( + can_hold_element, construct_1d_arraylike_from_scalar, construct_2d_arraylike_from_scalar, find_common_type, @@ -99,7 +100,6 @@ invalidate_string_dtypes, maybe_box_native, maybe_downcast_to_dtype, - validate_numeric_casting, ) from pandas.core.dtypes.common import ( ensure_platform_int, @@ -3865,7 +3865,9 @@ def _set_value( series = self._get_item_cache(col) loc = self.index.get_loc(index) - validate_numeric_casting(series.dtype, value) + if not can_hold_element(series._values, value): + # We'll go through loc and end up casting. + raise TypeError series._mgr.setitem_inplace(loc, value) # Note: trying to use series._set_value breaks tests in diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index d2b02a5fac0cb..f84d797b0deb6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -69,7 +69,6 @@ find_common_type, infer_dtype_from, maybe_cast_pointwise_result, - validate_numeric_casting, ) from pandas.core.dtypes.common import ( ensure_int64, @@ -5589,7 +5588,8 @@ def set_value(self, arr, key, value): stacklevel=find_stack_level(), ) loc = self._engine.get_loc(key) - validate_numeric_casting(arr.dtype, value) + if not can_hold_element(arr, value): + raise ValueError arr[loc] = value _index_shared_docs[ diff --git a/pandas/core/series.py b/pandas/core/series.py index fa1ac0fcfb82d..81b901b13a42b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -62,10 +62,10 @@ ) from pandas.core.dtypes.cast import ( + can_hold_element, convert_dtypes, maybe_box_native, maybe_cast_pointwise_result, - validate_numeric_casting, ) from pandas.core.dtypes.common import ( ensure_platform_int, @@ -1143,9 +1143,9 @@ def __setitem__(self, key, value) -> None: def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) - # error: Argument 1 to "validate_numeric_casting" has incompatible type - # "Union[dtype, ExtensionDtype]"; expected "dtype" - validate_numeric_casting(self.dtype, value) # type: ignore[arg-type] + if not can_hold_element(self._values, value): + raise ValueError + # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) diff --git a/pandas/tests/dtypes/cast/test_can_hold_element.py b/pandas/tests/dtypes/cast/test_can_hold_element.py index 3a486f795f23e..906123b1aee74 100644 --- a/pandas/tests/dtypes/cast/test_can_hold_element.py +++ b/pandas/tests/dtypes/cast/test_can_hold_element.py @@ -53,3 +53,18 @@ def test_can_hold_element_int_values_float_ndarray(): # integer but not losslessly castable to int64 element = np.array([3, 2 ** 65], dtype=np.float64) assert not can_hold_element(arr, element) + + +def test_can_hold_element_int8_int(): + arr = np.array([], dtype=np.int8) + + element = 2 + assert can_hold_element(arr, element) + assert can_hold_element(arr, np.int8(element)) + assert can_hold_element(arr, np.uint8(element)) + assert can_hold_element(arr, np.int16(element)) + assert can_hold_element(arr, np.uint16(element)) + assert can_hold_element(arr, np.int32(element)) + assert can_hold_element(arr, np.uint32(element)) + assert can_hold_element(arr, np.int64(element)) + assert can_hold_element(arr, np.uint64(element)) diff --git a/pandas/tests/frame/indexing/test_set_value.py b/pandas/tests/frame/indexing/test_set_value.py index b8150c26aa6bb..7b68566bab225 100644 --- a/pandas/tests/frame/indexing/test_set_value.py +++ b/pandas/tests/frame/indexing/test_set_value.py @@ -1,5 +1,4 @@ import numpy as np -import pytest from pandas.core.dtypes.common import is_float_dtype @@ -38,9 +37,9 @@ def test_set_value_resize(self, float_frame): res._set_value("foobar", "baz", 5) assert is_float_dtype(res["baz"]) assert isna(res["baz"].drop(["foobar"])).all() - msg = "could not convert string to float: 'sam'" - with pytest.raises(ValueError, match=msg): - res._set_value("foobar", "baz", "sam") + + res._set_value("foobar", "baz", "sam") + assert res.loc["foobar", "baz"] == "sam" def test_set_value_with_index_dtype_change(self): df_orig = DataFrame(np.random.randn(3, 3), index=range(3), columns=list("ABC")) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 014f0f5933387..1ace46b0ca5c9 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -110,38 +110,26 @@ def test_setitem_series_object(self, val, exp_dtype): "val,exp_dtype", [(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)], ) - def test_setitem_series_int64(self, val, exp_dtype, request): + def test_setitem_series_int64(self, val, exp_dtype): obj = pd.Series([1, 2, 3, 4]) assert obj.dtype == np.int64 - if exp_dtype is np.float64: - exp = pd.Series([1, 1, 3, 4]) - self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64) - mark = pytest.mark.xfail(reason="GH12747 The result must be float") - request.node.add_marker(mark) - exp = pd.Series([1, val, 3, 4]) self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) @pytest.mark.parametrize( "val,exp_dtype", [(np.int32(1), np.int8), (np.int16(2 ** 9), np.int16)] ) - def test_setitem_series_int8(self, val, exp_dtype, request): + def test_setitem_series_int8(self, val, exp_dtype): obj = pd.Series([1, 2, 3, 4], dtype=np.int8) assert obj.dtype == np.int8 - if exp_dtype is np.int16: - exp = pd.Series([1, 0, 3, 4], dtype=np.int8) - self._assert_setitem_series_conversion(obj, val, exp, np.int8) - mark = pytest.mark.xfail( - reason="BUG: it must be pd.Series([1, 1, 3, 4], dtype=np.int16" - ) - request.node.add_marker(mark) - warn = None if exp_dtype is np.int8 else FutureWarning msg = "Values are too large to be losslessly cast to int8" with tm.assert_produces_warning(warn, match=msg): exp = pd.Series([1, val, 3, 4], dtype=np.int8) + + exp = pd.Series([1, val, 3, 4], dtype=exp_dtype) self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) @pytest.mark.parametrize(
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry Two cases this fixes (both tested in test_coersion) ``` ser = pd.Series([1, 2, 3, 4]) ser[1] = 1.1 >>> ser[1] 1.1 # <- PR 1 # <- master; note that setting with loc or iloc *does* cast ser2 = pd.Series([1, 2, 3, 4], dtype=np.int8) ser2[1] = np.int16(512) >>> ser2[1] 512 # <- PR; casts to int16 0 # <- master; still np.int8; note that setting with with loc or iloc *does* cast ``` Downside: can_hold_element is heavier-weight than validate_numeric_casting.
https://api.github.com/repos/pandas-dev/pandas/pulls/45121
2021-12-30T02:05:16Z
2021-12-31T15:20:29Z
2021-12-31T15:20:29Z
2021-12-31T16:05:09Z
CI: Migrate Python 3.10 testing to Posix GHA/Azure Pipelines
diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml index 86c25642f4b2a..135ca0703de8b 100644 --- a/.github/workflows/posix.yml +++ b/.github/workflows/posix.yml @@ -33,9 +33,11 @@ jobs: [actions-38.yaml, "not slow and not clipboard", "", "", "", "", ""], [actions-38.yaml, "slow", "", "", "", "", ""], [actions-pypy-38.yaml, "not slow and not clipboard", "", "", "", "", "--max-worker-restart 0"], - [actions-39-numpydev.yaml, "not slow and not network", "xsel", "", "", "deprecate", "-W error"], [actions-39.yaml, "slow", "", "", "", "", ""], - [actions-39.yaml, "not slow and not clipboard", "", "", "", "", ""] + [actions-39.yaml, "not slow and not clipboard", "", "", "", "", ""], + [actions-310-numpydev.yaml, "not slow and not network", "xclip", "", "", "deprecate", "-W error"], + [actions-310.yaml, "not slow and not clipboard", "", "", "", "", ""], + [actions-310.yaml, "slow", "", "", "", "", ""], ] fail-fast: false env: diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index 37d8b8474d962..fa1eee2db6fc3 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -1,3 +1,11 @@ +# This file is purposely frozen(does not run). DO NOT DELETE IT +# Unfreeze(by commentingthe if: false() condition) once the +# next Python Dev version has released beta 1 and both Cython and numpy support it +# After that Python has released, migrate the workflows to the +# posix GHA workflows/Azure pipelines and "freeze" this file by +# uncommenting the if: false() condition +# Feel free to modify this comment as necessary. + name: Python Dev on: @@ -21,13 +29,14 @@ env: jobs: build: + if: false # Comment this line out to "unfreeze" runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-latest, macOS-latest, windows-latest] - name: actions-310-dev + name: actions-311-dev timeout-minutes: 80 concurrency: @@ -43,7 +52,7 @@ jobs: - name: Set up Python Dev Version uses: actions/setup-python@v2 with: - python-version: '3.10-dev' + python-version: '3.11-dev' # TODO: GH#44980 https://github.com/pypa/setuptools/issues/2941 - name: Install dependencies diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml index b7c36bb87353b..02a4a9ad44865 100644 --- a/ci/azure/posix.yml +++ b/ci/azure/posix.yml @@ -18,6 +18,26 @@ jobs: CONDA_PY: "38" PATTERN: "not slow" PYTEST_TARGET: "pandas/tests/[i-z]*" + py39_macos_1: + ENV_FILE: ci/deps/azure-macos-39.yaml + CONDA_PY: "39" + PATTERN: "not slow" + PYTEST_TARGET: "pandas/tests/[a-h]*" + py39_macos_2: + ENV_FILE: ci/deps/azure-macos-39.yaml + CONDA_PY: "39" + PATTERN: "not slow" + PYTEST_TARGET: "pandas/tests/[i-z]*" + py310_macos_1: + ENV_FILE: ci/deps/azure-macos-310.yaml + CONDA_PY: "310" + PATTERN: "not slow" + PYTEST_TARGET: "pandas/tests/[a-h]*" + py310_macos_2: + ENV_FILE: ci/deps/azure-macos-310.yaml + CONDA_PY: "310" + PATTERN: "not slow" + PYTEST_TARGET: "pandas/tests/[i-z]*" steps: - script: echo '##vso[task.prependpath]$(HOME)/miniconda3/bin' diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml index 7f3efb5a4dbf3..7061a266f28c7 100644 --- a/ci/azure/windows.yml +++ b/ci/azure/windows.yml @@ -36,6 +36,20 @@ jobs: PYTEST_WORKERS: 2 # GH-42236 PYTEST_TARGET: "pandas/tests/[i-z]*" + py310_1: + ENV_FILE: ci/deps/azure-windows-310.yaml + CONDA_PY: "310" + PATTERN: "not slow and not high_memory" + PYTEST_WORKERS: 2 # GH-42236 + PYTEST_TARGET: "pandas/tests/[a-h]*" + + py310_2: + ENV_FILE: ci/deps/azure-windows-310.yaml + CONDA_PY: "310" + PATTERN: "not slow and not high_memory" + PYTEST_WORKERS: 2 # GH-42236 + PYTEST_TARGET: "pandas/tests/[i-z]*" + steps: - powershell: | Write-Host "##vso[task.prependpath]$env:CONDA\Scripts" diff --git a/ci/deps/actions-39-numpydev.yaml b/ci/deps/actions-310-numpydev.yaml similarity index 95% rename from ci/deps/actions-39-numpydev.yaml rename to ci/deps/actions-310-numpydev.yaml index 4a6acf55e265f..3e32665d5433f 100644 --- a/ci/deps/actions-39-numpydev.yaml +++ b/ci/deps/actions-310-numpydev.yaml @@ -2,7 +2,7 @@ name: pandas-dev channels: - defaults dependencies: - - python=3.9 + - python=3.10 # tools - pytest>=6.0 diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml new file mode 100644 index 0000000000000..9829380620f86 --- /dev/null +++ b/ci/deps/actions-310.yaml @@ -0,0 +1,51 @@ +name: pandas-dev +channels: + - conda-forge +dependencies: + - python=3.9 + + # test dependencies + - cython=0.29.24 + - pytest>=6.0 + - pytest-cov + - pytest-xdist>=1.31 + - hypothesis>=5.5.3 + - psutil + + # required dependencies + - python-dateutil + - numpy + - pytz + + # optional dependencies + - beautifulsoup4 + - blosc + - bottleneck + - fastparquet + - fsspec + - html5lib + - gcsfs + - jinja2 + - lxml + - matplotlib + # TODO: uncomment after numba supports py310 + #- numba + - numexpr + - openpyxl + - odfpy + - pandas-gbq + - psycopg2 + - pymysql + - pytables + - pyarrow + - pyreadstat + - pyxlsb + - s3fs + - scipy + - sqlalchemy + - tabulate + - xarray + - xlrd + - xlsxwriter + - xlwt + - zstandard diff --git a/ci/deps/azure-macos-310.yaml b/ci/deps/azure-macos-310.yaml new file mode 100644 index 0000000000000..312fac8091db6 --- /dev/null +++ b/ci/deps/azure-macos-310.yaml @@ -0,0 +1,36 @@ +name: pandas-dev +channels: + - defaults + - conda-forge +dependencies: + - python=3.10 + + # tools + - cython>=0.29.24 + - pytest>=6.0 + - pytest-xdist>=1.31 + - hypothesis>=5.5.3 + - pytest-azurepipelines + + # pandas dependencies + - beautifulsoup4 + - bottleneck + - html5lib + - jinja2 + - lxml + - matplotlib + - nomkl + - numexpr + - numpy + - openpyxl + - pyarrow + - pyreadstat + - pytables + - python-dateutil==2.8.1 + - pytz + - pyxlsb + - xarray + - xlrd + - xlsxwriter + - xlwt + - zstandard diff --git a/ci/deps/azure-macos-38.yaml b/ci/deps/azure-macos-38.yaml index 472dc8754d13e..422aa86c57fc7 100644 --- a/ci/deps/azure-macos-38.yaml +++ b/ci/deps/azure-macos-38.yaml @@ -6,6 +6,7 @@ dependencies: - python=3.8 # tools + - cython>=0.29.24 - pytest>=6.0 - pytest-xdist>=1.31 - hypothesis>=5.5.3 @@ -33,6 +34,3 @@ dependencies: - xlsxwriter - xlwt - zstandard - - pip - - pip: - - cython>=0.29.24 diff --git a/ci/deps/azure-macos-39.yaml b/ci/deps/azure-macos-39.yaml new file mode 100644 index 0000000000000..a0860ef536953 --- /dev/null +++ b/ci/deps/azure-macos-39.yaml @@ -0,0 +1,36 @@ +name: pandas-dev +channels: + - defaults + - conda-forge +dependencies: + - python=3.9 + + # tools + - cython>=0.29.24 + - pytest>=6.0 + - pytest-xdist>=1.31 + - hypothesis>=5.5.3 + - pytest-azurepipelines + + # pandas dependencies + - beautifulsoup4 + - bottleneck + - html5lib + - jinja2 + - lxml + - matplotlib=3.3.2 + - nomkl + - numexpr + - numpy=1.21.3 + - openpyxl + - pyarrow=1.0.1 + - pyreadstat + - pytables + - python-dateutil==2.8.1 + - pytz + - pyxlsb + - xarray + - xlrd + - xlsxwriter + - xlwt + - zstandard diff --git a/ci/deps/azure-windows-310.yaml b/ci/deps/azure-windows-310.yaml new file mode 100644 index 0000000000000..8e6f4deef6057 --- /dev/null +++ b/ci/deps/azure-windows-310.yaml @@ -0,0 +1,41 @@ +name: pandas-dev +channels: + - conda-forge + - defaults +dependencies: + - python=3.10 + + # tools + - cython>=0.29.24 + - pytest>=6.0 + - pytest-xdist>=1.31 + - hypothesis>=5.5.3 + - pytest-azurepipelines + + # pandas dependencies + - beautifulsoup4 + - bottleneck + - fsspec>=0.8.0 + - gcsfs + - html5lib + - jinja2 + - lxml + - matplotlib + # TODO: uncomment after numba supports py310 + #- numba + - numexpr + - numpy + - openpyxl + - pyarrow + - pytables + - python-dateutil + - pytz + - s3fs>=0.4.2 + - scipy + - sqlalchemy + - xlrd + - xlsxwriter + - xlwt + - pyreadstat + - pyxlsb + - zstandard diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 6c07366e402d6..ff247349ff4d5 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -682,7 +682,7 @@ def test_raise_error_on_datetime_time_data(self): # GH 8113, datetime.time type is not supported by matplotlib in scatter df = DataFrame(np.random.randn(10), columns=["a"]) df["dtime"] = date_range(start="2014-01-01", freq="h", periods=10).time - msg = "must be a string or a number, not 'datetime.time'" + msg = "must be a string or a (real )?number, not 'datetime.time'" with pytest.raises(TypeError, match=msg): df.plot(kind="scatter", x="dtime", y="a")
- [ ] closes #43890 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/45120
2021-12-30T00:33:50Z
2022-01-17T13:42:04Z
2022-01-17T13:42:03Z
2022-01-20T20:17:04Z
Improving ambiguous doc string
diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 9d3b80b321537..924a7ad9a0751 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -87,7 +87,7 @@ def explode(ndarray[object] values): Parameters ---------- - values : object ndarray + values : ndarray[object] Returns -------
null
https://api.github.com/repos/pandas-dev/pandas/pulls/45119
2021-12-30T00:23:16Z
2021-12-30T01:13:23Z
2021-12-30T01:13:23Z
2021-12-30T01:13:28Z
CI/TST: Set deadline=None for flaky hypothesis test
diff --git a/pandas/tests/tseries/offsets/test_offsets_properties.py b/pandas/tests/tseries/offsets/test_offsets_properties.py index 1b4fa9292c403..5071c816c313d 100644 --- a/pandas/tests/tseries/offsets/test_offsets_properties.py +++ b/pandas/tests/tseries/offsets/test_offsets_properties.py @@ -10,6 +10,7 @@ from hypothesis import ( assume, given, + settings, ) import pytest import pytz @@ -45,6 +46,7 @@ def test_on_offset_implementations(dt, offset): @given(YQM_OFFSET) +@settings(deadline=None) # GH 45118 def test_shift_across_dst(offset): # GH#18319 check that 1) timezone is correctly normalized and # 2) that hour is not incorrectly changed by this normalization
xref: https://github.com/pandas-dev/pandas/runs/4662296945?check_suite_focus=true
https://api.github.com/repos/pandas-dev/pandas/pulls/45118
2021-12-29T23:06:46Z
2021-12-30T00:21:03Z
2021-12-30T00:21:03Z
2021-12-30T01:20:08Z
REF: avoid object-casting in _get_codes_for_values
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index bc83c723fae4a..25b9a1212a1ef 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -298,7 +298,7 @@ def _get_values_for_rank(values: ArrayLike) -> np.ndarray: return values -def get_data_algo(values: ArrayLike): +def _get_data_algo(values: ArrayLike): values = _get_values_for_rank(values) ndtype = _check_object_for_strings(values) @@ -555,7 +555,7 @@ def factorize_array( codes : ndarray[np.intp] uniques : ndarray """ - hash_klass, values = get_data_algo(values) + hash_klass, values = _get_data_algo(values) table = hash_klass(size_hint or len(values)) uniques, codes = table.factorize( @@ -1747,7 +1747,7 @@ def safe_sort( if sorter is None: # mixed types - hash_klass, values = get_data_algo(values) + hash_klass, values = _get_data_algo(values) t = hash_klass(len(values)) t.map_locations(values) sorter = ensure_platform_int(t.lookup(ordered)) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index acace9085e18e..0188fedbf7741 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -55,7 +55,6 @@ ) from pandas.core.dtypes.common import ( ensure_int64, - ensure_object, ensure_platform_int, is_categorical_dtype, is_datetime64_dtype, @@ -93,7 +92,6 @@ import pandas.core.algorithms as algorithms from pandas.core.algorithms import ( factorize, - get_data_algo, take_nd, unique1d, ) @@ -2749,8 +2747,6 @@ def _get_codes_for_values(values, categories: Index) -> np.ndarray: If `values` is known to be a Categorical, use recode_for_categories instead. """ - dtype_equal = is_dtype_equal(values.dtype, categories.dtype) - if values.ndim > 1: flat = values.ravel() codes = _get_codes_for_values(flat, categories) @@ -2762,30 +2758,9 @@ def _get_codes_for_values(values, categories: Index) -> np.ndarray: # Categorical(array[Period, Period], categories=PeriodIndex(...)) cls = categories.dtype.construct_array_type() values = maybe_cast_to_extension_array(cls, values) - if not isinstance(values, cls): - # exception raised in _from_sequence - values = ensure_object(values) - # error: Incompatible types in assignment (expression has type - # "ndarray", variable has type "Index") - categories = ensure_object(categories) # type: ignore[assignment] - elif not dtype_equal: - values = ensure_object(values) - # error: Incompatible types in assignment (expression has type "ndarray", - # variable has type "Index") - categories = ensure_object(categories) # type: ignore[assignment] - - if isinstance(categories, ABCIndex): - return coerce_indexer_dtype(categories.get_indexer_for(values), categories) - - # Only hit here when we've already coerced to object dtypee. - - hash_klass, vals = get_data_algo(values) - # pandas/core/arrays/categorical.py:2661: error: Argument 1 to "get_data_algo" has - # incompatible type "Index"; expected "Union[ExtensionArray, ndarray]" [arg-type] - _, cats = get_data_algo(categories) # type: ignore[arg-type] - t = hash_klass(len(cats)) - t.map_locations(cats) - return coerce_indexer_dtype(t.lookup(vals), cats) + + codes = categories.get_indexer_for(values) + return coerce_indexer_dtype(codes, categories) def recode_for_categories( diff --git a/pandas/tests/io/parser/dtypes/test_categorical.py b/pandas/tests/io/parser/dtypes/test_categorical.py index 7e04dd570e812..3b8c520004f12 100644 --- a/pandas/tests/io/parser/dtypes/test_categorical.py +++ b/pandas/tests/io/parser/dtypes/test_categorical.py @@ -269,7 +269,6 @@ def test_categorical_coerces_timestamp(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow def test_categorical_coerces_timedelta(all_parsers): parser = all_parsers dtype = {"b": CategoricalDtype(pd.to_timedelta(["1H", "2H", "3H"]))}
Index.get_indexer_for ends up going through the same hashtable-based lookup anyway. We add a little constructor overhead, but avoid object-dtype casting in cases where the common-dtype is e.g. numeric.
https://api.github.com/repos/pandas-dev/pandas/pulls/45117
2021-12-29T22:56:06Z
2021-12-30T15:53:26Z
2021-12-30T15:53:26Z
2021-12-30T15:54:25Z
BUG: to_xml raising for pd.NA
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 22d6ae94863a1..3924191bebcfd 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -856,6 +856,7 @@ I/O - Bug in :func:`to_csv` always coercing datetime columns with different formats to the same format (:issue:`21734`) - :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` with ``compression`` set to ``'zip'`` no longer create a zip file containing a file ending with ".zip". Instead, they try to infer the inner file name more smartly. (:issue:`39465`) - Bug in :func:`read_csv` where reading a mixed column of booleans and missing values to a float type results in the missing values becoming 1.0 rather than NaN (:issue:`42808`, :issue:`34120`) +- Bug in :func:`to_xml` raising error for ``pd.NA`` with extension array dtype (:issue:`43903`) - Bug in :func:`read_csv` when passing simultaneously a parser in ``date_parser`` and ``parse_dates=False``, the parsing was still called (:issue:`44366`) - Bug in :func:`read_csv` not setting name of :class:`MultiIndex` columns correctly when ``index_col`` is not the first column (:issue:`38549`) - Bug in :func:`read_csv` silently ignoring errors when failing to create a memory-mapped file (:issue:`44766`) diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py index 8e05afaa06919..1b11bb12757bb 100644 --- a/pandas/io/formats/xml.py +++ b/pandas/io/formats/xml.py @@ -18,6 +18,7 @@ from pandas.util._decorators import doc from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.missing import isna from pandas.core.frame import DataFrame from pandas.core.shared_docs import _shared_docs @@ -571,9 +572,7 @@ def build_elems(self) -> None: elem_name = f"{self.prefix_uri}{flat_col}" try: val = ( - None - if self.d[col] in [None, ""] or self.d[col] != self.d[col] - else str(self.d[col]) + None if isna(self.d[col]) or self.d[col] == "" else str(self.d[col]) ) SubElement(self.elem_row, elem_name).text = val except KeyError: diff --git a/pandas/tests/io/xml/__init__.py b/pandas/tests/io/xml/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/io/xml/test_to_xml.py b/pandas/tests/io/xml/test_to_xml.py index e0c2b3794a00c..c8828c08dba44 100644 --- a/pandas/tests/io/xml/test_to_xml.py +++ b/pandas/tests/io/xml/test_to_xml.py @@ -12,6 +12,7 @@ import pandas.util._test_decorators as td from pandas import ( + NA, DataFrame, Index, ) @@ -1307,15 +1308,25 @@ def test_filename_and_suffix_comp(parser, compression_only): assert geom_xml == output.strip() +@td.skip_if_no("lxml") +def test_ea_dtypes(any_numeric_ea_dtype): + # GH#43903 + expected = """<?xml version='1.0' encoding='utf-8'?> +<data> + <row> + <index>0</index> + <a/> + </row> +</data>""" + df = DataFrame({"a": [NA]}).astype(any_numeric_ea_dtype) + result = df.to_xml() + assert result.strip() == expected + + def test_unsuported_compression(datapath, parser): with pytest.raises(ValueError, match="Unrecognized compression type"): with tm.ensure_clean() as path: - # Argument "compression" to "to_xml" of "DataFrame" has incompatible type - # "Literal['7z']"; expected "Union[Literal['infer'], Literal['gzip'], - # Literal['bz2'], Literal['zip'], Literal['xz'], Dict[str, Any], None]" - geom_df.to_xml( - path, parser=parser, compression="7z" # type: ignore[arg-type] - ) + geom_df.to_xml(path, parser=parser, compression="7z") # STORAGE OPTIONS diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py index 30ba95fd82bf2..b72111ec6cf1e 100644 --- a/pandas/tests/io/xml/test_xml.py +++ b/pandas/tests/io/xml/test_xml.py @@ -684,9 +684,7 @@ def test_names_option_wrong_type(datapath, parser): filename = datapath("io", "data", "xml", "books.xml") with pytest.raises(TypeError, match=("is not a valid type for names")): - read_xml( - filename, names="Col1, Col2, Col3", parser=parser # type: ignore[arg-type] - ) + read_xml(filename, names="Col1, Col2, Col3", parser=parser) # ENCODING @@ -1056,10 +1054,7 @@ def test_wrong_compression(parser, compression, compression_only): def test_unsuported_compression(datapath, parser): with pytest.raises(ValueError, match="Unrecognized compression type"): with tm.ensure_clean() as path: - # error: Argument "compression" to "read_xml" has incompatible type - # "Literal['7z']"; expected "Union[Literal['infer'], Literal['gzip'], - # Literal['bz2'], Literal['zip'], Literal['xz'], Dict[str, Any], None]" - read_xml(path, parser=parser, compression="7z") # type: ignore[arg-type] + read_xml(path, parser=parser, compression="7z") # STORAGE OPTIONS
- [x] closes #43903 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/45116
2021-12-29T22:20:17Z
2021-12-30T14:10:52Z
2021-12-30T14:10:51Z
2021-12-30T14:13:33Z
TST: More pytest.mark.parameterize
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 8b677bde2f7e2..7cf2721621a03 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -775,16 +775,6 @@ def test_constructor_dict_of_generators(self): tm.assert_frame_equal(result, expected) def test_constructor_dict_multiindex(self): - def check(result, expected): - return tm.assert_frame_equal( - result, - expected, - check_dtype=True, - check_index_type=True, - check_column_type=True, - check_names=True, - ) - d = { ("a", "a"): {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2}, ("b", "a"): {("i", "i"): 6, ("i", "j"): 5, ("j", "i"): 4}, @@ -796,7 +786,10 @@ def check(result, expected): [x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d]) ).T expected.index = MultiIndex.from_tuples(expected.index) - check(df, expected) + tm.assert_frame_equal( + df, + expected, + ) d["z"] = {"y": 123.0, ("i", "i"): 111, ("i", "j"): 111, ("j", "i"): 111} _d.insert(0, ("z", d["z"])) @@ -806,7 +799,7 @@ def check(result, expected): expected.index = Index(expected.index, tupleize_cols=False) df = DataFrame(d) df = df.reindex(columns=expected.columns, index=expected.index) - check(df, expected) + tm.assert_frame_equal(df, expected) def test_constructor_dict_datetime64_index(self): # GH 10160 @@ -2167,44 +2160,38 @@ def test_constructor_series_copy(self, float_frame): assert not (series["A"] == 5).all() - def test_constructor_with_nas(self): + @pytest.mark.parametrize( + "df", + [ + DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan]), + DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan]), + DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan]), + DataFrame( + [[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan] + ), + DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1, 2, 2]), + ], + ) + def test_constructor_with_nas(self, df): # GH 5016 # na's in indices + # GH 21428 (non-unique columns) - def check(df): - for i in range(len(df.columns)): - df.iloc[:, i] - - indexer = np.arange(len(df.columns))[isna(df.columns)] - - # No NaN found -> error - if len(indexer) == 0: - with pytest.raises(KeyError, match="^nan$"): - df.loc[:, np.nan] - # single nan should result in Series - elif len(indexer) == 1: - tm.assert_series_equal(df.iloc[:, indexer[0]], df.loc[:, np.nan]) - # multiple nans should result in DataFrame - else: - tm.assert_frame_equal(df.iloc[:, indexer], df.loc[:, np.nan]) - - df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan]) - check(df) - - df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan]) - check(df) - - df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan]) - check(df) + for i in range(len(df.columns)): + df.iloc[:, i] - df = DataFrame( - [[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan] - ) - check(df) + indexer = np.arange(len(df.columns))[isna(df.columns)] - # GH 21428 (non-unique columns) - df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1, 2, 2]) - check(df) + # No NaN found -> error + if len(indexer) == 0: + with pytest.raises(KeyError, match="^nan$"): + df.loc[:, np.nan] + # single nan should result in Series + elif len(indexer) == 1: + tm.assert_series_equal(df.iloc[:, indexer[0]], df.loc[:, np.nan]) + # multiple nans should result in DataFrame + else: + tm.assert_frame_equal(df.iloc[:, indexer], df.loc[:, np.nan]) def test_constructor_lists_to_object_dtype(self): # from #1074 diff --git a/pandas/tests/groupby/test_libgroupby.py b/pandas/tests/groupby/test_libgroupby.py index 06e3e4f2877d6..2a24448d24ce2 100644 --- a/pandas/tests/groupby/test_libgroupby.py +++ b/pandas/tests/groupby/test_libgroupby.py @@ -130,35 +130,32 @@ class TestGroupVarFloat32(GroupVarTestMixin): rtol = 1e-2 -def test_group_ohlc(): - def _check(dtype): - obj = np.array(np.random.randn(20), dtype=dtype) +@pytest.mark.parametrize("dtype", ["float32", "float64"]) +def test_group_ohlc(dtype): + obj = np.array(np.random.randn(20), dtype=dtype) - bins = np.array([6, 12, 20]) - out = np.zeros((3, 4), dtype) - counts = np.zeros(len(out), dtype=np.int64) - labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins]))) + bins = np.array([6, 12, 20]) + out = np.zeros((3, 4), dtype) + counts = np.zeros(len(out), dtype=np.int64) + labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins]))) - func = libgroupby.group_ohlc - func(out, counts, obj[:, None], labels) + func = libgroupby.group_ohlc + func(out, counts, obj[:, None], labels) - def _ohlc(group): - if isna(group).all(): - return np.repeat(np.nan, 4) - return [group[0], group.max(), group.min(), group[-1]] + def _ohlc(group): + if isna(group).all(): + return np.repeat(np.nan, 4) + return [group[0], group.max(), group.min(), group[-1]] - expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])]) + expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])]) - tm.assert_almost_equal(out, expected) - tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64)) + tm.assert_almost_equal(out, expected) + tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64)) - obj[:6] = np.nan - func(out, counts, obj[:, None], labels) - expected[0] = np.nan - tm.assert_almost_equal(out, expected) - - _check("float32") - _check("float64") + obj[:6] = np.nan + func(out, counts, obj[:, None], labels) + expected[0] = np.nan + tm.assert_almost_equal(out, expected) def _check_cython_group_transform_cumulative(pd_op, np_op, dtype): diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py index ee517a667d832..6ec4d1fac8c8a 100644 --- a/pandas/tests/indexes/multi/test_duplicates.py +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -178,49 +178,44 @@ def test_has_duplicates_from_tuples(): assert not mi.has_duplicates -def test_has_duplicates_overflow(): +@pytest.mark.parametrize("nlevels", [4, 8]) +@pytest.mark.parametrize("with_nulls", [True, False]) +def test_has_duplicates_overflow(nlevels, with_nulls): # handle int64 overflow if possible - def check(nlevels, with_nulls): - codes = np.tile(np.arange(500), 2) - level = np.arange(500) + # no overflow with 4 + # overflow possible with 8 + codes = np.tile(np.arange(500), 2) + level = np.arange(500) - if with_nulls: # inject some null values - codes[500] = -1 # common nan value - codes = [codes.copy() for i in range(nlevels)] - for i in range(nlevels): - codes[i][500 + i - nlevels // 2] = -1 + if with_nulls: # inject some null values + codes[500] = -1 # common nan value + codes = [codes.copy() for i in range(nlevels)] + for i in range(nlevels): + codes[i][500 + i - nlevels // 2] = -1 - codes += [np.array([-1, 1]).repeat(500)] - else: - codes = [codes] * nlevels + [np.arange(2).repeat(500)] + codes += [np.array([-1, 1]).repeat(500)] + else: + codes = [codes] * nlevels + [np.arange(2).repeat(500)] - levels = [level] * nlevels + [[0, 1]] + levels = [level] * nlevels + [[0, 1]] - # no dups - mi = MultiIndex(levels=levels, codes=codes) - assert not mi.has_duplicates - - # with a dup - if with_nulls: - - def f(a): - return np.insert(a, 1000, a[0]) + # no dups + mi = MultiIndex(levels=levels, codes=codes) + assert not mi.has_duplicates - codes = list(map(f, codes)) - mi = MultiIndex(levels=levels, codes=codes) - else: - values = mi.values.tolist() - mi = MultiIndex.from_tuples(values + [values[0]]) + # with a dup + if with_nulls: - assert mi.has_duplicates + def f(a): + return np.insert(a, 1000, a[0]) - # no overflow - check(4, False) - check(4, True) + codes = list(map(f, codes)) + mi = MultiIndex(levels=levels, codes=codes) + else: + values = mi.values.tolist() + mi = MultiIndex.from_tuples(values + [values[0]]) - # overflow possible - check(8, False) - check(8, True) + assert mi.has_duplicates @pytest.mark.parametrize( diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py index e6dfe64df3864..5544b8112627b 100644 --- a/pandas/tests/io/pytables/test_append.py +++ b/pandas/tests/io/pytables/test_append.py @@ -670,14 +670,10 @@ def test_append_misc(setup_path): result = store.select("df1") tm.assert_frame_equal(result, df) - # more chunksize in append tests - def check(obj, comparator): - for c in [10, 200, 1000]: - with ensure_clean_store(setup_path, mode="w") as store: - store.append("obj", obj, chunksize=c) - result = store.select("obj") - comparator(result, obj) +@pytest.mark.parametrize("chunksize", [10, 200, 1000]) +def test_append_misc_chunksize(setup_path, chunksize): + # more chunksize in append tests df = tm.makeDataFrame() df["string"] = "foo" df["float322"] = 1.0 @@ -685,8 +681,13 @@ def check(obj, comparator): df["bool"] = df["float322"] > 0 df["time1"] = Timestamp("20130101") df["time2"] = Timestamp("20130102") - check(df, tm.assert_frame_equal) + with ensure_clean_store(setup_path, mode="w") as store: + store.append("obj", df, chunksize=chunksize) + result = store.select("obj") + tm.assert_frame_equal(result, df) + +def test_append_misc_empty_frame(setup_path): # empty frame, GH4273 with ensure_clean_store(setup_path) as store: diff --git a/pandas/tests/io/pytables/test_file_handling.py b/pandas/tests/io/pytables/test_file_handling.py index df0f7b0951b7d..027c3d0bd821c 100644 --- a/pandas/tests/io/pytables/test_file_handling.py +++ b/pandas/tests/io/pytables/test_file_handling.py @@ -29,71 +29,64 @@ pytestmark = pytest.mark.single -def test_mode(setup_path): +@pytest.mark.parametrize("mode", ["r", "r+", "a", "w"]) +def test_mode(setup_path, mode): df = tm.makeTimeDataFrame() + msg = r"[\S]* does not exist" + with ensure_clean_path(setup_path) as path: - def check(mode): - - msg = r"[\S]* does not exist" - with ensure_clean_path(setup_path) as path: - - # constructor - if mode in ["r", "r+"]: - with pytest.raises(OSError, match=msg): - HDFStore(path, mode=mode) + # constructor + if mode in ["r", "r+"]: + with pytest.raises(OSError, match=msg): + HDFStore(path, mode=mode) - else: - store = HDFStore(path, mode=mode) - assert store._handle.mode == mode - store.close() + else: + store = HDFStore(path, mode=mode) + assert store._handle.mode == mode + store.close() - with ensure_clean_path(setup_path) as path: + with ensure_clean_path(setup_path) as path: - # context - if mode in ["r", "r+"]: - with pytest.raises(OSError, match=msg): - with HDFStore(path, mode=mode) as store: - pass - else: + # context + if mode in ["r", "r+"]: + with pytest.raises(OSError, match=msg): with HDFStore(path, mode=mode) as store: - assert store._handle.mode == mode + pass + else: + with HDFStore(path, mode=mode) as store: + assert store._handle.mode == mode - with ensure_clean_path(setup_path) as path: + with ensure_clean_path(setup_path) as path: - # conv write - if mode in ["r", "r+"]: - with pytest.raises(OSError, match=msg): - df.to_hdf(path, "df", mode=mode) - df.to_hdf(path, "df", mode="w") - else: + # conv write + if mode in ["r", "r+"]: + with pytest.raises(OSError, match=msg): df.to_hdf(path, "df", mode=mode) - - # conv read - if mode in ["w"]: - msg = ( - "mode w is not allowed while performing a read. " - r"Allowed modes are r, r\+ and a." - ) - with pytest.raises(ValueError, match=msg): - read_hdf(path, "df", mode=mode) - else: - result = read_hdf(path, "df", mode=mode) - tm.assert_frame_equal(result, df) - - def check_default_mode(): - - # read_hdf uses default mode - with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df", mode="w") - result = read_hdf(path, "df") + else: + df.to_hdf(path, "df", mode=mode) + + # conv read + if mode in ["w"]: + msg = ( + "mode w is not allowed while performing a read. " + r"Allowed modes are r, r\+ and a." + ) + with pytest.raises(ValueError, match=msg): + read_hdf(path, "df", mode=mode) + else: + result = read_hdf(path, "df", mode=mode) tm.assert_frame_equal(result, df) - check("r") - check("r+") - check("a") - check("w") - check_default_mode() + +def test_default_mode(setup_path): + # read_hdf uses default mode + df = tm.makeTimeDataFrame() + with ensure_clean_path(setup_path) as path: + df.to_hdf(path, "df", mode="w") + result = read_hdf(path, "df") + tm.assert_frame_equal(result, df) def test_reopen_handle(setup_path):
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/45115
2021-12-29T22:03:01Z
2021-12-30T14:08:56Z
2021-12-30T14:08:56Z
2021-12-30T17:56:49Z
TST: Parameterize test_rank.py
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 87af6152b8189..4820fcce6486b 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -94,64 +94,92 @@ def test_frame_in_list(self): with pytest.raises(ValueError, match=msg): df in [None] - def test_comparison_invalid(self): - def check(df, df2): - - for (x, y) in [(df, df2), (df2, df)]: - # we expect the result to match Series comparisons for - # == and !=, inequalities should raise - result = x == y - expected = DataFrame( - {col: x[col] == y[col] for col in x.columns}, - index=x.index, - columns=x.columns, - ) - tm.assert_frame_equal(result, expected) - - result = x != y - expected = DataFrame( - {col: x[col] != y[col] for col in x.columns}, - index=x.index, - columns=x.columns, - ) - tm.assert_frame_equal(result, expected) - - msgs = [ - r"Invalid comparison between dtype=datetime64\[ns\] and ndarray", - "invalid type promotion", - ( - # npdev 1.20.0 - r"The DTypes <class 'numpy.dtype\[.*\]'> and " - r"<class 'numpy.dtype\[.*\]'> do not have a common DType." - ), - ] - msg = "|".join(msgs) - with pytest.raises(TypeError, match=msg): - x >= y - with pytest.raises(TypeError, match=msg): - x > y - with pytest.raises(TypeError, match=msg): - x < y - with pytest.raises(TypeError, match=msg): - x <= y - + @pytest.mark.parametrize( + "arg, arg2", + [ + [ + { + "a": np.random.randint(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": np.random.randint(10, size=10), + "b": np.random.randint(10, size=10), + }, + ], + [ + { + "a": np.random.randint(10, size=10), + "b": np.random.randint(10, size=10), + }, + { + "a": np.random.randint(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + ], + [ + { + "a": pd.date_range("20010101", periods=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": np.random.randint(10, size=10), + "b": np.random.randint(10, size=10), + }, + ], + [ + { + "a": np.random.randint(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": pd.date_range("20010101", periods=10), + "b": pd.date_range("20010101", periods=10), + }, + ], + ], + ) + def test_comparison_invalid(self, arg, arg2): # GH4968 # invalid date/int comparisons - df = DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"]) - df["dates"] = pd.date_range("20010101", periods=len(df)) - - df2 = df.copy() - df2["dates"] = df["a"] - check(df, df2) + x = DataFrame(arg) + y = DataFrame(arg2) + # we expect the result to match Series comparisons for + # == and !=, inequalities should raise + result = x == y + expected = DataFrame( + {col: x[col] == y[col] for col in x.columns}, + index=x.index, + columns=x.columns, + ) + tm.assert_frame_equal(result, expected) - df = DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"]) - df2 = DataFrame( - { - "a": pd.date_range("20010101", periods=len(df)), - "b": pd.date_range("20100101", periods=len(df)), - } + result = x != y + expected = DataFrame( + {col: x[col] != y[col] for col in x.columns}, + index=x.index, + columns=x.columns, ) - check(df, df2) + tm.assert_frame_equal(result, expected) + + msgs = [ + r"Invalid comparison between dtype=datetime64\[ns\] and ndarray", + "invalid type promotion", + ( + # npdev 1.20.0 + r"The DTypes <class 'numpy.dtype\[.*\]'> and " + r"<class 'numpy.dtype\[.*\]'> do not have a common DType." + ), + ] + msg = "|".join(msgs) + with pytest.raises(TypeError, match=msg): + x >= y + with pytest.raises(TypeError, match=msg): + x > y + with pytest.raises(TypeError, match=msg): + x < y + with pytest.raises(TypeError, match=msg): + x <= y def test_timestamp_compare(self): # make sure we can compare Timestamps on the right AND left hand side diff --git a/pandas/tests/series/methods/test_rank.py b/pandas/tests/series/methods/test_rank.py index 43b210a50dab2..d85b84bec55ac 100644 --- a/pandas/tests/series/methods/test_rank.py +++ b/pandas/tests/series/methods/test_rank.py @@ -1,7 +1,5 @@ -from itertools import ( - chain, - product, -) +from itertools import chain +import operator import numpy as np import pytest @@ -22,17 +20,25 @@ from pandas.api.types import CategoricalDtype -class TestSeriesRank: - s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]) +@pytest.fixture +def ser(): + return Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]) + + +@pytest.fixture( + params=[ + ["average", np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5])], + ["min", np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5])], + ["max", np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6])], + ["first", np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6])], + ["dense", np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])], + ] +) +def results(request): + return request.param - results = { - "average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]), - "min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]), - "max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]), - "first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]), - "dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]), - } +class TestSeriesRank: @td.skip_if_no_scipy def test_rank(self, datetime_series): from scipy.stats import rankdata @@ -216,61 +222,49 @@ def test_rank_signature(self): with pytest.raises(ValueError, match=msg): s.rank("average") - def test_rank_tie_methods(self): - s = self.s - - def _check(s, expected, method="average"): - result = s.rank(method=method) - tm.assert_series_equal(result, Series(expected)) - - dtypes = [None, object] - disabled = {(object, "first")} - results = self.results - - for method, dtype in product(results, dtypes): - if (dtype, method) in disabled: - continue - series = s if dtype is None else s.astype(dtype) - _check(series, results[method], method=method) + @pytest.mark.parametrize("dtype", [None, object]) + def test_rank_tie_methods(self, ser, results, dtype): + method, exp = results + ser = ser if dtype is None else ser.astype(dtype) + result = ser.rank(method=method) + tm.assert_series_equal(result, Series(exp)) @td.skip_if_no_scipy @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"]) @pytest.mark.parametrize("na_option", ["top", "bottom", "keep"]) - def test_rank_tie_methods_on_infs_nans(self, method, na_option, ascending): - dtypes = [ + @pytest.mark.parametrize( + "dtype, na_value, pos_inf, neg_inf", + [ ("object", None, Infinity(), NegInfinity()), ("float64", np.nan, np.inf, -np.inf), - ] + ], + ) + def test_rank_tie_methods_on_infs_nans( + self, method, na_option, ascending, dtype, na_value, pos_inf, neg_inf + ): chunk = 3 - disabled = {("object", "first")} - - def _check(s, method, na_option, ascending): - exp_ranks = { - "average": ([2, 2, 2], [5, 5, 5], [8, 8, 8]), - "min": ([1, 1, 1], [4, 4, 4], [7, 7, 7]), - "max": ([3, 3, 3], [6, 6, 6], [9, 9, 9]), - "first": ([1, 2, 3], [4, 5, 6], [7, 8, 9]), - "dense": ([1, 1, 1], [2, 2, 2], [3, 3, 3]), - } - ranks = exp_ranks[method] - if na_option == "top": - order = [ranks[1], ranks[0], ranks[2]] - elif na_option == "bottom": - order = [ranks[0], ranks[2], ranks[1]] - else: - order = [ranks[0], [np.nan] * chunk, ranks[1]] - expected = order if ascending else order[::-1] - expected = list(chain.from_iterable(expected)) - result = s.rank(method=method, na_option=na_option, ascending=ascending) - tm.assert_series_equal(result, Series(expected, dtype="float64")) - - for dtype, na_value, pos_inf, neg_inf in dtypes: - in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk - iseries = Series(in_arr, dtype=dtype) - if (dtype, method) in disabled: - continue - _check(iseries, method, na_option, ascending) + + in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk + iseries = Series(in_arr, dtype=dtype) + exp_ranks = { + "average": ([2, 2, 2], [5, 5, 5], [8, 8, 8]), + "min": ([1, 1, 1], [4, 4, 4], [7, 7, 7]), + "max": ([3, 3, 3], [6, 6, 6], [9, 9, 9]), + "first": ([1, 2, 3], [4, 5, 6], [7, 8, 9]), + "dense": ([1, 1, 1], [2, 2, 2], [3, 3, 3]), + } + ranks = exp_ranks[method] + if na_option == "top": + order = [ranks[1], ranks[0], ranks[2]] + elif na_option == "bottom": + order = [ranks[0], ranks[2], ranks[1]] + else: + order = [ranks[0], [np.nan] * chunk, ranks[1]] + expected = order if ascending else order[::-1] + expected = list(chain.from_iterable(expected)) + result = iseries.rank(method=method, na_option=na_option, ascending=ascending) + tm.assert_series_equal(result, Series(expected, dtype="float64")) def test_rank_desc_mix_nans_infs(self): # GH 19538 @@ -281,7 +275,16 @@ def test_rank_desc_mix_nans_infs(self): tm.assert_series_equal(result, exp) @td.skip_if_no_scipy - def test_rank_methods_series(self): + @pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"]) + @pytest.mark.parametrize( + "op, value", + [ + [operator.add, 0], + [operator.add, 1e6], + [operator.mul, 1e-6], + ], + ) + def test_rank_methods_series(self, method, op, value): from scipy.stats import rankdata xs = np.random.randn(9) @@ -289,19 +292,17 @@ def test_rank_methods_series(self): np.random.shuffle(xs) index = [chr(ord("a") + i) for i in range(len(xs))] + vals = op(xs, value) + ts = Series(vals, index=index) + result = ts.rank(method=method) + sprank = rankdata(vals, method if method != "first" else "ordinal") + expected = Series(sprank, index=index).astype("float64") + tm.assert_series_equal(result, expected) - for vals in [xs, xs + 1e6, xs * 1e-6]: - ts = Series(vals, index=index) - - for m in ["average", "min", "max", "first", "dense"]: - result = ts.rank(method=m) - sprank = rankdata(vals, m if m != "first" else "ordinal") - expected = Series(sprank, index=index).astype("float64") - tm.assert_series_equal(result, expected) - - def test_rank_dense_method(self): - dtypes = ["O", "f8", "i8"] - in_out = [ + @pytest.mark.parametrize("dtype", ["O", "f8", "i8"]) + @pytest.mark.parametrize( + "ser, exp", + [ ([1], [1]), ([2], [1]), ([0], [1]), @@ -310,43 +311,38 @@ def test_rank_dense_method(self): ([4, 2, 1], [3, 2, 1]), ([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]), ([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5]), - ] - - for ser, exp in in_out: - for dtype in dtypes: - s = Series(ser).astype(dtype) - result = s.rank(method="dense") - expected = Series(exp).astype(result.dtype) - tm.assert_series_equal(result, expected) - - def test_rank_descending(self): - dtypes = ["O", "f8", "i8"] - - for dtype, method in product(dtypes, self.results): - if "i" in dtype: - s = self.s.dropna() - else: - s = self.s.astype(dtype) - - res = s.rank(ascending=False) - expected = (s.max() - s).rank() - tm.assert_series_equal(res, expected) - - if method == "first" and dtype == "O": - continue - - expected = (s.max() - s).rank(method=method) - res2 = s.rank(method=method, ascending=False) - tm.assert_series_equal(res2, expected) - - def test_rank_int(self): - s = self.s.dropna().astype("i8") - - for method, res in self.results.items(): - result = s.rank(method=method) - expected = Series(res).dropna() - expected.index = result.index - tm.assert_series_equal(result, expected) + ], + ) + def test_rank_dense_method(self, dtype, ser, exp): + s = Series(ser).astype(dtype) + result = s.rank(method="dense") + expected = Series(exp).astype(result.dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["O", "f8", "i8"]) + def test_rank_descending(self, ser, results, dtype): + method, _ = results + if "i" in dtype: + s = ser.dropna() + else: + s = ser.astype(dtype) + + res = s.rank(ascending=False) + expected = (s.max() - s).rank() + tm.assert_series_equal(res, expected) + + expected = (s.max() - s).rank(method=method) + res2 = s.rank(method=method, ascending=False) + tm.assert_series_equal(res2, expected) + + def test_rank_int(self, ser, results): + method, exp = results + s = ser.dropna().astype("i8") + + result = s.rank(method=method) + expected = Series(exp).dropna() + expected.index = result.index + tm.assert_series_equal(result, expected) def test_rank_object_bug(self): # GH 13445
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/45114
2021-12-29T20:33:48Z
2021-12-29T23:55:51Z
2021-12-29T23:55:51Z
2021-12-30T01:19:13Z