content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
from datetime import (\n date,\n timedelta,\n timezone,\n)\nfrom decimal import Decimal\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import lib\nfrom pandas._libs.tslibs import IncompatibleFrequency\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DatetimeTZDtype,\n Index,\n Series,\n Timedelta,\n bdate_range,\n date_range,\n isna,\n)\nimport pandas._testing as tm\nfrom pandas.core import ops\nfrom pandas.core.computation import expressions as expr\nfrom pandas.core.computation.check import NUMEXPR_INSTALLED\n\n\n@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"])\ndef switch_numexpr_min_elements(request, monkeypatch):\n with monkeypatch.context() as m:\n m.setattr(expr, "_MIN_ELEMENTS", request.param)\n yield\n\n\ndef _permute(obj):\n return obj.take(np.random.default_rng(2).permutation(len(obj)))\n\n\nclass TestSeriesFlexArithmetic:\n @pytest.mark.parametrize(\n "ts",\n [\n (lambda x: x, lambda x: x * 2, False),\n (lambda x: x, lambda x: x[::2], False),\n (lambda x: x, lambda x: 5, True),\n (\n lambda x: Series(range(10), dtype=np.float64),\n lambda x: Series(range(10), dtype=np.float64),\n True,\n ),\n ],\n )\n @pytest.mark.parametrize(\n "opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]\n )\n def test_flex_method_equivalence(self, opname, ts):\n # check that Series.{opname} behaves like Series.__{opname}__,\n tser = Series(\n np.arange(20, dtype=np.float64),\n index=date_range("2020-01-01", periods=20),\n name="ts",\n )\n\n series = ts[0](tser)\n other = ts[1](tser)\n check_reverse = ts[2]\n\n op = getattr(Series, opname)\n alt = getattr(operator, opname)\n\n result = op(series, other)\n expected = alt(series, other)\n tm.assert_almost_equal(result, expected)\n if check_reverse:\n rop = getattr(Series, "r" + opname)\n result = rop(series, other)\n expected = alt(other, series)\n tm.assert_almost_equal(result, expected)\n\n def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):\n # GH 13208\n class MySeries(Series):\n _metadata = ["x"]\n\n @property\n def _constructor(self):\n return MySeries\n\n opname = all_arithmetic_operators\n op = getattr(Series, opname)\n m = MySeries([1, 2, 3], name="test")\n m.x = 42\n result = op(m, 1)\n assert result.x == 42\n\n def test_flex_add_scalar_fill_value(self):\n # GH12723\n ser = Series([0, 1, np.nan, 3, 4, 5])\n\n exp = ser.fillna(0).add(2)\n res = ser.add(2, fill_value=0)\n tm.assert_series_equal(res, exp)\n\n pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]\n for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:\n fv = 0\n lop = getattr(Series, op)\n lequiv = getattr(operator, op)\n rop = getattr(Series, "r" + op)\n # bind op at definition time...\n requiv = lambda x, y, op=op: getattr(operator, op)(y, x)\n pairings.append((lop, lequiv, fv))\n pairings.append((rop, requiv, fv))\n\n @pytest.mark.parametrize("op, equiv_op, fv", pairings)\n def test_operators_combine(self, op, equiv_op, fv):\n def _check_fill(meth, op, a, b, fill_value=0):\n exp_index = a.index.union(b.index)\n a = a.reindex(exp_index)\n b = b.reindex(exp_index)\n\n amask = isna(a)\n bmask = isna(b)\n\n exp_values = []\n for i in range(len(exp_index)):\n with np.errstate(all="ignore"):\n if amask[i]:\n if bmask[i]:\n exp_values.append(np.nan)\n continue\n exp_values.append(op(fill_value, b[i]))\n elif bmask[i]:\n if amask[i]:\n exp_values.append(np.nan)\n continue\n exp_values.append(op(a[i], fill_value))\n else:\n exp_values.append(op(a[i], b[i]))\n\n result = meth(a, b, fill_value=fill_value)\n expected = Series(exp_values, exp_index)\n tm.assert_series_equal(result, expected)\n\n a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))\n b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))\n\n result = op(a, b)\n exp = equiv_op(a, b)\n tm.assert_series_equal(result, exp)\n _check_fill(op, equiv_op, a, b, fill_value=fv)\n # should accept axis=0 or axis='rows'\n op(a, b, axis=0)\n\n\nclass TestSeriesArithmetic:\n # Some of these may end up in tests/arithmetic, but are not yet sorted\n\n def test_add_series_with_period_index(self):\n rng = pd.period_range("1/1/2000", "1/1/2010", freq="Y")\n ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)\n\n result = ts + ts[::2]\n expected = ts + ts\n expected.iloc[1::2] = np.nan\n tm.assert_series_equal(result, expected)\n\n result = ts + _permute(ts[::2])\n tm.assert_series_equal(result, expected)\n\n msg = "Input has different freq=D from Period\\(freq=Y-DEC\\)"\n with pytest.raises(IncompatibleFrequency, match=msg):\n ts + ts.asfreq("D", how="end")\n\n @pytest.mark.parametrize(\n "target_add,input_value,expected_value",\n [\n ("!", ["hello", "world"], ["hello!", "world!"]),\n ("m", ["hello", "world"], ["hellom", "worldm"]),\n ],\n )\n def test_string_addition(self, target_add, input_value, expected_value):\n # GH28658 - ensure adding 'm' does not raise an error\n a = Series(input_value)\n\n result = a + target_add\n expected = Series(expected_value)\n tm.assert_series_equal(result, expected)\n\n def test_divmod(self):\n # GH#25557\n a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])\n b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])\n\n result = a.divmod(b)\n expected = divmod(a, b)\n tm.assert_series_equal(result[0], expected[0])\n tm.assert_series_equal(result[1], expected[1])\n\n result = a.rdivmod(b)\n expected = divmod(b, a)\n tm.assert_series_equal(result[0], expected[0])\n tm.assert_series_equal(result[1], expected[1])\n\n @pytest.mark.parametrize("index", [None, range(9)])\n def test_series_integer_mod(self, index):\n # GH#24396\n s1 = Series(range(1, 10))\n s2 = Series("foo", index=index)\n\n msg = "not all arguments converted during string formatting|'mod' not supported"\n\n with pytest.raises(TypeError, match=msg):\n s2 % s1\n\n def test_add_with_duplicate_index(self):\n # GH14227\n s1 = Series([1, 2], index=[1, 1])\n s2 = Series([10, 10], index=[1, 2])\n result = s1 + s2\n expected = Series([11, 12, np.nan], index=[1, 1, 2])\n tm.assert_series_equal(result, expected)\n\n def test_add_na_handling(self):\n ser = Series(\n [Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]\n )\n\n result = ser + ser.shift(1)\n result2 = ser.shift(1) + ser\n assert isna(result.iloc[0])\n assert isna(result2.iloc[0])\n\n def test_add_corner_cases(self, datetime_series):\n empty = Series([], index=Index([]), dtype=np.float64)\n\n result = datetime_series + empty\n assert np.isnan(result).all()\n\n result = empty + empty.copy()\n assert len(result) == 0\n\n def test_add_float_plus_int(self, datetime_series):\n # float + int\n int_ts = datetime_series.astype(int)[:-5]\n added = datetime_series + int_ts\n expected = Series(\n datetime_series.values[:-5] + int_ts.values,\n index=datetime_series.index[:-5],\n name="ts",\n )\n tm.assert_series_equal(added[:-5], expected)\n\n def test_mul_empty_int_corner_case(self):\n s1 = Series([], [], dtype=np.int32)\n s2 = Series({"x": 0.0})\n tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))\n\n def test_sub_datetimelike_align(self):\n # GH#7500\n # datetimelike ops need to align\n dt = Series(date_range("2012-1-1", periods=3, freq="D"))\n dt.iloc[2] = np.nan\n dt2 = dt[::-1]\n\n expected = Series([timedelta(0), timedelta(0), pd.NaT])\n # name is reset\n result = dt2 - dt\n tm.assert_series_equal(result, expected)\n\n expected = Series(expected, name=0)\n result = (dt2.to_frame() - dt.to_frame())[0]\n tm.assert_series_equal(result, expected)\n\n def test_alignment_doesnt_change_tz(self):\n # GH#33671\n dti = date_range("2016-01-01", periods=10, tz="CET")\n dti_utc = dti.tz_convert("UTC")\n ser = Series(10, index=dti)\n ser_utc = Series(10, index=dti_utc)\n\n # we don't care about the result, just that original indexes are unchanged\n ser * ser_utc\n\n assert ser.index is dti\n assert ser_utc.index is dti_utc\n\n def test_alignment_categorical(self):\n # GH13365\n cat = Categorical(["3z53", "3z53", "LoJG", "LoJG", "LoJG", "N503"])\n ser1 = Series(2, index=cat)\n ser2 = Series(2, index=cat[:-1])\n result = ser1 * ser2\n\n exp_index = ["3z53"] * 4 + ["LoJG"] * 9 + ["N503"]\n exp_index = pd.CategoricalIndex(exp_index, categories=cat.categories)\n exp_values = [4.0] * 13 + [np.nan]\n expected = Series(exp_values, exp_index)\n\n tm.assert_series_equal(result, expected)\n\n def test_arithmetic_with_duplicate_index(self):\n # GH#8363\n # integer ops with a non-unique index\n index = [2, 2, 3, 3, 4]\n ser = Series(np.arange(1, 6, dtype="int64"), index=index)\n other = Series(np.arange(5, dtype="int64"), index=index)\n result = ser - other\n expected = Series(1, index=[2, 2, 3, 3, 4])\n tm.assert_series_equal(result, expected)\n\n # GH#8363\n # datetime ops with a non-unique index\n ser = Series(date_range("20130101 09:00:00", periods=5), index=index)\n other = Series(date_range("20130101", periods=5), index=index)\n result = ser - other\n expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4])\n tm.assert_series_equal(result, expected)\n\n def test_masked_and_non_masked_propagate_na(self):\n # GH#45810\n ser1 = Series([0, np.nan], dtype="float")\n ser2 = Series([0, 1], dtype="Int64")\n result = ser1 * ser2\n expected = Series([0, pd.NA], dtype="Float64")\n tm.assert_series_equal(result, expected)\n\n def test_mask_div_propagate_na_for_non_na_dtype(self):\n # GH#42630\n ser1 = Series([15, pd.NA, 5, 4], dtype="Int64")\n ser2 = Series([15, 5, np.nan, 4])\n result = ser1 / ser2\n expected = Series([1.0, pd.NA, pd.NA, 1.0], dtype="Float64")\n tm.assert_series_equal(result, expected)\n\n result = ser2 / ser1\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("val, dtype", [(3, "Int64"), (3.5, "Float64")])\n def test_add_list_to_masked_array(self, val, dtype):\n # GH#22962\n ser = Series([1, None, 3], dtype="Int64")\n result = ser + [1, None, val]\n expected = Series([2, None, 3 + val], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n result = [1, None, val] + ser\n tm.assert_series_equal(result, expected)\n\n def test_add_list_to_masked_array_boolean(self, request):\n # GH#22962\n warning = (\n UserWarning\n if request.node.callspec.id == "numexpr" and NUMEXPR_INSTALLED\n else None\n )\n ser = Series([True, None, False], dtype="boolean")\n with tm.assert_produces_warning(warning):\n result = ser + [True, None, True]\n expected = Series([True, None, True], dtype="boolean")\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(warning):\n result = [True, None, True] + ser\n tm.assert_series_equal(result, expected)\n\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestSeriesFlexComparison:\n @pytest.mark.parametrize("axis", [0, None, "index"])\n def test_comparison_flex_basic(self, axis, comparison_op):\n left = Series(np.random.default_rng(2).standard_normal(10))\n right = Series(np.random.default_rng(2).standard_normal(10))\n result = getattr(left, comparison_op.__name__)(right, axis=axis)\n expected = comparison_op(left, right)\n tm.assert_series_equal(result, expected)\n\n def test_comparison_bad_axis(self, comparison_op):\n left = Series(np.random.default_rng(2).standard_normal(10))\n right = Series(np.random.default_rng(2).standard_normal(10))\n\n msg = "No axis named 1 for object type"\n with pytest.raises(ValueError, match=msg):\n getattr(left, comparison_op.__name__)(right, axis=1)\n\n @pytest.mark.parametrize(\n "values, op",\n [\n ([False, False, True, False], "eq"),\n ([True, True, False, True], "ne"),\n ([False, False, True, False], "le"),\n ([False, False, False, False], "lt"),\n ([False, True, True, False], "ge"),\n ([False, True, False, False], "gt"),\n ],\n )\n def test_comparison_flex_alignment(self, values, op):\n left = Series([1, 3, 2], index=list("abc"))\n right = Series([2, 2, 2], index=list("bcd"))\n result = getattr(left, op)(right)\n expected = Series(values, index=list("abcd"))\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "values, op, fill_value",\n [\n ([False, False, True, True], "eq", 2),\n ([True, True, False, False], "ne", 2),\n ([False, False, True, True], "le", 0),\n ([False, False, False, True], "lt", 0),\n ([True, True, True, False], "ge", 0),\n ([True, True, False, False], "gt", 0),\n ],\n )\n def test_comparison_flex_alignment_fill(self, values, op, fill_value):\n left = Series([1, 3, 2], index=list("abc"))\n right = Series([2, 2, 2], index=list("bcd"))\n result = getattr(left, op)(right, fill_value=fill_value)\n expected = Series(values, index=list("abcd"))\n tm.assert_series_equal(result, expected)\n\n\nclass TestSeriesComparison:\n def test_comparison_different_length(self):\n a = Series(["a", "b", "c"])\n b = Series(["b", "a"])\n msg = "only compare identically-labeled Series"\n with pytest.raises(ValueError, match=msg):\n a < b\n\n a = Series([1, 2])\n b = Series([2, 3, 4])\n with pytest.raises(ValueError, match=msg):\n a == b\n\n @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])\n def test_ser_flex_cmp_return_dtypes(self, opname):\n # GH#15115\n ser = Series([1, 3, 2], index=range(3))\n const = 2\n result = getattr(ser, opname)(const).dtypes\n expected = np.dtype("bool")\n assert result == expected\n\n @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])\n def test_ser_flex_cmp_return_dtypes_empty(self, opname):\n # GH#15115 empty Series case\n ser = Series([1, 3, 2], index=range(3))\n empty = ser.iloc[:0]\n const = 2\n result = getattr(empty, opname)(const).dtypes\n expected = np.dtype("bool")\n assert result == expected\n\n @pytest.mark.parametrize(\n "names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]\n )\n def test_ser_cmp_result_names(self, names, comparison_op):\n # datetime64 dtype\n op = comparison_op\n dti = date_range("1949-06-07 03:00:00", freq="h", periods=5, name=names[0])\n ser = Series(dti).rename(names[1])\n result = op(ser, dti)\n assert result.name == names[2]\n\n # datetime64tz dtype\n dti = dti.tz_localize("US/Central")\n dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize\n ser = Series(dti).rename(names[1])\n result = op(ser, dti)\n assert result.name == names[2]\n\n # timedelta64 dtype\n tdi = dti - dti.shift(1)\n ser = Series(tdi).rename(names[1])\n result = op(ser, tdi)\n assert result.name == names[2]\n\n # interval dtype\n if op in [operator.eq, operator.ne]:\n # interval dtype comparisons not yet implemented\n ii = pd.interval_range(start=0, periods=5, name=names[0])\n ser = Series(ii).rename(names[1])\n result = op(ser, ii)\n assert result.name == names[2]\n\n # categorical\n if op in [operator.eq, operator.ne]:\n # categorical dtype comparisons raise for inequalities\n cidx = tdi.astype("category")\n ser = Series(cidx).rename(names[1])\n result = op(ser, cidx)\n assert result.name == names[2]\n\n def test_comparisons(self):\n s = Series(["a", "b", "c"])\n s2 = Series([False, True, False])\n\n # it works!\n exp = Series([False, False, False])\n tm.assert_series_equal(s == s2, exp)\n tm.assert_series_equal(s2 == s, exp)\n\n # -----------------------------------------------------------------\n # Categorical Dtype Comparisons\n\n def test_categorical_comparisons(self):\n # GH#8938\n # allow equality comparisons\n a = Series(list("abc"), dtype="category")\n b = Series(list("abc"), dtype="object")\n c = Series(["a", "b", "cc"], dtype="object")\n d = Series(list("acb"), dtype="object")\n e = Categorical(list("abc"))\n f = Categorical(list("acb"))\n\n # vs scalar\n assert not (a == "a").all()\n assert ((a != "a") == ~(a == "a")).all()\n\n assert not ("a" == a).all()\n assert (a == "a")[0]\n assert ("a" == a)[0]\n assert not ("a" != a)[0]\n\n # vs list-like\n assert (a == a).all()\n assert not (a != a).all()\n\n assert (a == list(a)).all()\n assert (a == b).all()\n assert (b == a).all()\n assert ((~(a == b)) == (a != b)).all()\n assert ((~(b == a)) == (b != a)).all()\n\n assert not (a == c).all()\n assert not (c == a).all()\n assert not (a == d).all()\n assert not (d == a).all()\n\n # vs a cat-like\n assert (a == e).all()\n assert (e == a).all()\n assert not (a == f).all()\n assert not (f == a).all()\n\n assert (~(a == e) == (a != e)).all()\n assert (~(e == a) == (e != a)).all()\n assert (~(a == f) == (a != f)).all()\n assert (~(f == a) == (f != a)).all()\n\n # non-equality is not comparable\n msg = "can only compare equality or not"\n with pytest.raises(TypeError, match=msg):\n a < b\n with pytest.raises(TypeError, match=msg):\n b < a\n with pytest.raises(TypeError, match=msg):\n a > b\n with pytest.raises(TypeError, match=msg):\n b > a\n\n def test_unequal_categorical_comparison_raises_type_error(self):\n # unequal comparison should raise for unordered cats\n cat = Series(Categorical(list("abc")))\n msg = "can only compare equality or not"\n with pytest.raises(TypeError, match=msg):\n cat > "b"\n\n cat = Series(Categorical(list("abc"), ordered=False))\n with pytest.raises(TypeError, match=msg):\n cat > "b"\n\n # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057\n # and following comparisons with scalars not in categories should raise\n # for unequal comps, but not for equal/not equal\n cat = Series(Categorical(list("abc"), ordered=True))\n\n msg = "Invalid comparison between dtype=category and str"\n with pytest.raises(TypeError, match=msg):\n cat < "d"\n with pytest.raises(TypeError, match=msg):\n cat > "d"\n with pytest.raises(TypeError, match=msg):\n "d" < cat\n with pytest.raises(TypeError, match=msg):\n "d" > cat\n\n tm.assert_series_equal(cat == "d", Series([False, False, False]))\n tm.assert_series_equal(cat != "d", Series([True, True, True]))\n\n # -----------------------------------------------------------------\n\n def test_comparison_tuples(self):\n # GH#11339\n # comparisons vs tuple\n s = Series([(1, 1), (1, 2)])\n\n result = s == (1, 2)\n expected = Series([False, True])\n tm.assert_series_equal(result, expected)\n\n result = s != (1, 2)\n expected = Series([True, False])\n tm.assert_series_equal(result, expected)\n\n result = s == (0, 0)\n expected = Series([False, False])\n tm.assert_series_equal(result, expected)\n\n result = s != (0, 0)\n expected = Series([True, True])\n tm.assert_series_equal(result, expected)\n\n s = Series([(1, 1), (1, 1)])\n\n result = s == (1, 1)\n expected = Series([True, True])\n tm.assert_series_equal(result, expected)\n\n result = s != (1, 1)\n expected = Series([False, False])\n tm.assert_series_equal(result, expected)\n\n def test_comparison_frozenset(self):\n ser = Series([frozenset([1]), frozenset([1, 2])])\n\n result = ser == frozenset([1])\n expected = Series([True, False])\n tm.assert_series_equal(result, expected)\n\n def test_comparison_operators_with_nas(self, comparison_op):\n ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)\n ser[::2] = np.nan\n\n # test that comparisons work\n val = ser[5]\n\n result = comparison_op(ser, val)\n expected = comparison_op(ser.dropna(), val).reindex(ser.index)\n\n msg = "Downcasting object dtype arrays"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n if comparison_op is operator.ne:\n expected = expected.fillna(True).astype(bool)\n else:\n expected = expected.fillna(False).astype(bool)\n\n tm.assert_series_equal(result, expected)\n\n def test_ne(self):\n ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)\n expected = np.array([True, True, False, True, True])\n tm.assert_numpy_array_equal(ts.index != 5, expected)\n tm.assert_numpy_array_equal(~(ts.index == 5), expected)\n\n @pytest.mark.parametrize(\n "left, right",\n [\n (\n Series([1, 2, 3], index=list("ABC"), name="x"),\n Series([2, 2, 2], index=list("ABD"), name="x"),\n ),\n (\n Series([1, 2, 3], index=list("ABC"), name="x"),\n Series([2, 2, 2, 2], index=list("ABCD"), name="x"),\n ),\n ],\n )\n def test_comp_ops_df_compat(self, left, right, frame_or_series):\n # GH 1134\n # GH 50083 to clarify that index and columns must be identically labeled\n if frame_or_series is not Series:\n msg = (\n rf"Can only compare identically-labeled \(both index and columns\) "\n f"{frame_or_series.__name__} objects"\n )\n left = left.to_frame()\n right = right.to_frame()\n else:\n msg = (\n f"Can only compare identically-labeled {frame_or_series.__name__} "\n f"objects"\n )\n\n with pytest.raises(ValueError, match=msg):\n left == right\n with pytest.raises(ValueError, match=msg):\n right == left\n\n with pytest.raises(ValueError, match=msg):\n left != right\n with pytest.raises(ValueError, match=msg):\n right != left\n\n with pytest.raises(ValueError, match=msg):\n left < right\n with pytest.raises(ValueError, match=msg):\n right < left\n\n def test_compare_series_interval_keyword(self):\n # GH#25338\n ser = Series(["IntervalA", "IntervalB", "IntervalC"])\n result = ser == "IntervalA"\n expected = Series([True, False, False])\n tm.assert_series_equal(result, expected)\n\n\n# ------------------------------------------------------------------\n# Unsorted\n# These arithmetic tests were previously in other files, eventually\n# should be parametrized and put into tests.arithmetic\n\n\nclass TestTimeSeriesArithmetic:\n def test_series_add_tz_mismatch_converts_to_utc(self):\n rng = date_range("1/1/2011", periods=100, freq="h", tz="utc")\n\n perm = np.random.default_rng(2).permutation(100)[:90]\n ser1 = Series(\n np.random.default_rng(2).standard_normal(90),\n index=rng.take(perm).tz_convert("US/Eastern"),\n )\n\n perm = np.random.default_rng(2).permutation(100)[:90]\n ser2 = Series(\n np.random.default_rng(2).standard_normal(90),\n index=rng.take(perm).tz_convert("Europe/Berlin"),\n )\n\n result = ser1 + ser2\n\n uts1 = ser1.tz_convert("utc")\n uts2 = ser2.tz_convert("utc")\n expected = uts1 + uts2\n\n # sort since input indexes are not equal\n expected = expected.sort_index()\n\n assert result.index.tz is timezone.utc\n tm.assert_series_equal(result, expected)\n\n def test_series_add_aware_naive_raises(self):\n rng = date_range("1/1/2011", periods=10, freq="h")\n ser = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)\n\n ser_utc = ser.tz_localize("utc")\n\n msg = "Cannot join tz-naive with tz-aware DatetimeIndex"\n with pytest.raises(Exception, match=msg):\n ser + ser_utc\n\n with pytest.raises(Exception, match=msg):\n ser_utc + ser\n\n # TODO: belongs in tests/arithmetic?\n def test_datetime_understood(self, unit):\n # Ensures it doesn't fail to create the right series\n # reported in issue#16726\n series = Series(date_range("2012-01-01", periods=3, unit=unit))\n offset = pd.offsets.DateOffset(days=6)\n result = series - offset\n exp_dti = pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]).as_unit(\n unit\n )\n expected = Series(exp_dti)\n tm.assert_series_equal(result, expected)\n\n def test_align_date_objects_with_datetimeindex(self):\n rng = date_range("1/1/2000", periods=20)\n ts = Series(np.random.default_rng(2).standard_normal(20), index=rng)\n\n ts_slice = ts[5:]\n ts2 = ts_slice.copy()\n ts2.index = [x.date() for x in ts2.index]\n\n result = ts + ts2\n result2 = ts2 + ts\n expected = ts + ts[5:]\n expected.index = expected.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n\nclass TestNamePreservation:\n @pytest.mark.parametrize("box", [list, tuple, np.array, Index, Series, pd.array])\n @pytest.mark.parametrize("flex", [True, False])\n def test_series_ops_name_retention(self, flex, box, names, all_binary_operators):\n # GH#33930 consistent name-retention\n op = all_binary_operators\n\n left = Series(range(10), name=names[0])\n right = Series(range(10), name=names[1])\n\n name = op.__name__.strip("_")\n is_logical = name in ["and", "rand", "xor", "rxor", "or", "ror"]\n\n msg = (\n r"Logical ops \(and, or, xor\) between Pandas objects and "\n "dtype-less sequences"\n )\n warn = None\n if box in [list, tuple] and is_logical:\n warn = FutureWarning\n\n right = box(right)\n if flex:\n if is_logical:\n # Series doesn't have these as flex methods\n return\n result = getattr(left, name)(right)\n else:\n # GH#37374 logical ops behaving as set ops deprecated\n with tm.assert_produces_warning(warn, match=msg):\n result = op(left, right)\n\n assert isinstance(result, Series)\n if box in [Index, Series]:\n assert result.name is names[2] or result.name == names[2]\n else:\n assert result.name is names[0] or result.name == names[0]\n\n def test_binop_maybe_preserve_name(self, datetime_series):\n # names match, preserve\n result = datetime_series * datetime_series\n assert result.name == datetime_series.name\n result = datetime_series.mul(datetime_series)\n assert result.name == datetime_series.name\n\n result = datetime_series * datetime_series[:-2]\n assert result.name == datetime_series.name\n\n # names don't match, don't preserve\n cp = datetime_series.copy()\n cp.name = "something else"\n result = datetime_series + cp\n assert result.name is None\n result = datetime_series.add(cp)\n assert result.name is None\n\n ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]\n ops = ops + ["r" + op for op in ops]\n for op in ops:\n # names match, preserve\n ser = datetime_series.copy()\n result = getattr(ser, op)(ser)\n assert result.name == datetime_series.name\n\n # names don't match, don't preserve\n cp = datetime_series.copy()\n cp.name = "changed"\n result = getattr(ser, op)(cp)\n assert result.name is None\n\n def test_scalarop_preserve_name(self, datetime_series):\n result = datetime_series * 2\n assert result.name == datetime_series.name\n\n\nclass TestInplaceOperations:\n @pytest.mark.parametrize(\n "dtype1, dtype2, dtype_expected, dtype_mul",\n (\n ("Int64", "Int64", "Int64", "Int64"),\n ("float", "float", "float", "float"),\n ("Int64", "float", "Float64", "Float64"),\n ("Int64", "Float64", "Float64", "Float64"),\n ),\n )\n def test_series_inplace_ops(self, dtype1, dtype2, dtype_expected, dtype_mul):\n # GH 37910\n\n ser1 = Series([1], dtype=dtype1)\n ser2 = Series([2], dtype=dtype2)\n ser1 += ser2\n expected = Series([3], dtype=dtype_expected)\n tm.assert_series_equal(ser1, expected)\n\n ser1 -= ser2\n expected = Series([1], dtype=dtype_expected)\n tm.assert_series_equal(ser1, expected)\n\n ser1 *= ser2\n expected = Series([2], dtype=dtype_mul)\n tm.assert_series_equal(ser1, expected)\n\n\ndef test_none_comparison(request, series_with_simple_index):\n series = series_with_simple_index\n\n if len(series) < 1:\n request.applymarker(\n pytest.mark.xfail(reason="Test doesn't make sense on empty data")\n )\n\n # bug brought up by #1079\n # changed from TypeError in 0.17.0\n series.iloc[0] = np.nan\n\n # noinspection PyComparisonWithNone\n result = series == None # noqa: E711\n assert not result.iat[0]\n assert not result.iat[1]\n\n # noinspection PyComparisonWithNone\n result = series != None # noqa: E711\n assert result.iat[0]\n assert result.iat[1]\n\n result = None == series # noqa: E711\n assert not result.iat[0]\n assert not result.iat[1]\n\n result = None != series # noqa: E711\n assert result.iat[0]\n assert result.iat[1]\n\n if lib.is_np_dtype(series.dtype, "M") or isinstance(series.dtype, DatetimeTZDtype):\n # Following DatetimeIndex (and Timestamp) convention,\n # inequality comparisons with Series[datetime64] raise\n msg = "Invalid comparison"\n with pytest.raises(TypeError, match=msg):\n None > series\n with pytest.raises(TypeError, match=msg):\n series > None\n else:\n result = None > series\n assert not result.iat[0]\n assert not result.iat[1]\n\n result = series < None\n assert not result.iat[0]\n assert not result.iat[1]\n\n\ndef test_series_varied_multiindex_alignment():\n # GH 20414\n s1 = Series(\n range(8),\n index=pd.MultiIndex.from_product(\n [list("ab"), list("xy"), [1, 2]], names=["ab", "xy", "num"]\n ),\n )\n s2 = Series(\n [1000 * i for i in range(1, 5)],\n index=pd.MultiIndex.from_product([list("xy"), [1, 2]], names=["xy", "num"]),\n )\n result = s1.loc[pd.IndexSlice[["a"], :, :]] + s2\n expected = Series(\n [1000, 2001, 3002, 4003],\n index=pd.MultiIndex.from_tuples(\n [("a", "x", 1), ("a", "x", 2), ("a", "y", 1), ("a", "y", 2)],\n names=["ab", "xy", "num"],\n ),\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_rmod_consistent_large_series():\n # GH 29602\n result = Series([2] * 10001).rmod(-1)\n expected = Series([1] * 10001)\n\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\test_arithmetic.py | test_arithmetic.py | Python | 33,281 | 0.95 | 0.089598 | 0.100125 | awesome-app | 771 | 2023-10-02T01:00:36.985246 | Apache-2.0 | true | 1325e24cc47638e6d48a81c66ad06e7e |
from collections import OrderedDict\nfrom collections.abc import Iterator\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nfrom dateutil.tz import tzoffset\nimport numpy as np\nfrom numpy import ma\nimport pytest\n\nfrom pandas._libs import (\n iNaT,\n lib,\n)\nfrom pandas.compat import HAS_PYARROW\nfrom pandas.compat.numpy import np_version_gt2\nfrom pandas.errors import IntCastingNaNError\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n DatetimeIndex,\n DatetimeTZDtype,\n Index,\n Interval,\n IntervalIndex,\n MultiIndex,\n NaT,\n Period,\n RangeIndex,\n Series,\n Timestamp,\n date_range,\n isna,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n IntegerArray,\n IntervalArray,\n period_array,\n)\nfrom pandas.core.internals.blocks import NumpyBlock\n\n\nclass TestSeriesConstructors:\n def test_from_ints_with_non_nano_dt64_dtype(self, index_or_series):\n values = np.arange(10)\n\n res = index_or_series(values, dtype="M8[s]")\n expected = index_or_series(values.astype("M8[s]"))\n tm.assert_equal(res, expected)\n\n res = index_or_series(list(values), dtype="M8[s]")\n tm.assert_equal(res, expected)\n\n def test_from_na_value_and_interval_of_datetime_dtype(self):\n # GH#41805\n ser = Series([None], dtype="interval[datetime64[ns]]")\n assert ser.isna().all()\n assert ser.dtype == "interval[datetime64[ns], right]"\n\n def test_infer_with_date_and_datetime(self):\n # GH#49341 pre-2.0 we inferred datetime-and-date to datetime64, which\n # was inconsistent with Index behavior\n ts = Timestamp(2016, 1, 1)\n vals = [ts.to_pydatetime(), ts.date()]\n\n ser = Series(vals)\n expected = Series(vals, dtype=object)\n tm.assert_series_equal(ser, expected)\n\n idx = Index(vals)\n expected = Index(vals, dtype=object)\n tm.assert_index_equal(idx, expected)\n\n def test_unparsable_strings_with_dt64_dtype(self):\n # pre-2.0 these would be silently ignored and come back with object dtype\n vals = ["aa"]\n msg = "^Unknown datetime string format, unable to parse: aa, at position 0$"\n with pytest.raises(ValueError, match=msg):\n Series(vals, dtype="datetime64[ns]")\n\n with pytest.raises(ValueError, match=msg):\n Series(np.array(vals, dtype=object), dtype="datetime64[ns]")\n\n @pytest.mark.parametrize(\n "constructor",\n [\n # NOTE: some overlap with test_constructor_empty but that test does not\n # test for None or an empty generator.\n # test_constructor_pass_none tests None but only with the index also\n # passed.\n (lambda idx: Series(index=idx)),\n (lambda idx: Series(None, index=idx)),\n (lambda idx: Series({}, index=idx)),\n (lambda idx: Series((), index=idx)),\n (lambda idx: Series([], index=idx)),\n (lambda idx: Series((_ for _ in []), index=idx)),\n (lambda idx: Series(data=None, index=idx)),\n (lambda idx: Series(data={}, index=idx)),\n (lambda idx: Series(data=(), index=idx)),\n (lambda idx: Series(data=[], index=idx)),\n (lambda idx: Series(data=(_ for _ in []), index=idx)),\n ],\n )\n @pytest.mark.parametrize("empty_index", [None, []])\n def test_empty_constructor(self, constructor, empty_index):\n # GH 49573 (addition of empty_index parameter)\n expected = Series(index=empty_index)\n result = constructor(empty_index)\n\n assert result.dtype == object\n assert len(result.index) == 0\n tm.assert_series_equal(result, expected, check_index_type=True)\n\n def test_invalid_dtype(self):\n # GH15520\n msg = "not understood"\n invalid_list = [Timestamp, "Timestamp", list]\n for dtype in invalid_list:\n with pytest.raises(TypeError, match=msg):\n Series([], name="time", dtype=dtype)\n\n def test_invalid_compound_dtype(self):\n # GH#13296\n c_dtype = np.dtype([("a", "i8"), ("b", "f4")])\n cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)\n\n with pytest.raises(ValueError, match="Use DataFrame instead"):\n Series(cdt_arr, index=["A", "B"])\n\n def test_scalar_conversion(self):\n # Pass in scalar is disabled\n scalar = Series(0.5)\n assert not isinstance(scalar, float)\n\n def test_scalar_extension_dtype(self, ea_scalar_and_dtype):\n # GH 28401\n\n ea_scalar, ea_dtype = ea_scalar_and_dtype\n\n ser = Series(ea_scalar, index=range(3))\n expected = Series([ea_scalar] * 3, dtype=ea_dtype)\n\n assert ser.dtype == ea_dtype\n tm.assert_series_equal(ser, expected)\n\n def test_constructor(self, datetime_series, using_infer_string):\n empty_series = Series()\n assert datetime_series.index._is_all_dates\n\n # Pass in Series\n derived = Series(datetime_series)\n assert derived.index._is_all_dates\n\n tm.assert_index_equal(derived.index, datetime_series.index)\n # Ensure new index is not created\n assert id(datetime_series.index) == id(derived.index)\n\n # Mixed type Series\n mixed = Series(["hello", np.nan], index=[0, 1])\n assert mixed.dtype == np.object_ if not using_infer_string else "str"\n assert np.isnan(mixed[1])\n\n assert not empty_series.index._is_all_dates\n assert not Series().index._is_all_dates\n\n # exception raised is of type ValueError GH35744\n with pytest.raises(\n ValueError,\n match=r"Data must be 1-dimensional, got ndarray of shape \(3, 3\) instead",\n ):\n Series(np.random.default_rng(2).standard_normal((3, 3)), index=np.arange(3))\n\n mixed.name = "Series"\n rs = Series(mixed).name\n xp = "Series"\n assert rs == xp\n\n # raise on MultiIndex GH4187\n m = MultiIndex.from_arrays([[1, 2], [3, 4]])\n msg = "initializing a Series from a MultiIndex is not supported"\n with pytest.raises(NotImplementedError, match=msg):\n Series(m)\n\n def test_constructor_index_ndim_gt_1_raises(self):\n # GH#18579\n df = DataFrame([[1, 2], [3, 4], [5, 6]], index=[3, 6, 9])\n with pytest.raises(ValueError, match="Index data must be 1-dimensional"):\n Series([1, 3, 2], index=df)\n\n @pytest.mark.parametrize("input_class", [list, dict, OrderedDict])\n def test_constructor_empty(self, input_class, using_infer_string):\n empty = Series()\n empty2 = Series(input_class())\n\n # these are Index() and RangeIndex() which don't compare type equal\n # but are just .equals\n tm.assert_series_equal(empty, empty2, check_index_type=False)\n\n # With explicit dtype:\n empty = Series(dtype="float64")\n empty2 = Series(input_class(), dtype="float64")\n tm.assert_series_equal(empty, empty2, check_index_type=False)\n\n # GH 18515 : with dtype=category:\n empty = Series(dtype="category")\n empty2 = Series(input_class(), dtype="category")\n tm.assert_series_equal(empty, empty2, check_index_type=False)\n\n if input_class is not list:\n # With index:\n empty = Series(index=range(10))\n empty2 = Series(input_class(), index=range(10))\n tm.assert_series_equal(empty, empty2)\n\n # With index and dtype float64:\n empty = Series(np.nan, index=range(10))\n empty2 = Series(input_class(), index=range(10), dtype="float64")\n tm.assert_series_equal(empty, empty2)\n\n # GH 19853 : with empty string, index and dtype str\n empty = Series("", dtype=str, index=range(3))\n if using_infer_string:\n empty2 = Series("", index=range(3), dtype="str")\n else:\n empty2 = Series("", index=range(3))\n tm.assert_series_equal(empty, empty2)\n\n @pytest.mark.parametrize("input_arg", [np.nan, float("nan")])\n def test_constructor_nan(self, input_arg):\n empty = Series(dtype="float64", index=range(10))\n empty2 = Series(input_arg, index=range(10))\n\n tm.assert_series_equal(empty, empty2, check_index_type=False)\n\n @pytest.mark.parametrize(\n "dtype",\n ["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],\n )\n @pytest.mark.parametrize("index", [None, Index([])])\n def test_constructor_dtype_only(self, dtype, index):\n # GH-20865\n result = Series(dtype=dtype, index=index)\n assert result.dtype == dtype\n assert len(result) == 0\n\n def test_constructor_no_data_index_order(self):\n result = Series(index=["b", "a", "c"])\n assert result.index.tolist() == ["b", "a", "c"]\n\n def test_constructor_no_data_string_type(self):\n # GH 22477\n result = Series(index=[1], dtype=str)\n assert np.isnan(result.iloc[0])\n\n @pytest.mark.parametrize("item", ["entry", "ѐ", 13])\n def test_constructor_string_element_string_type(self, item):\n # GH 22477\n result = Series(item, index=[1], dtype=str)\n assert result.iloc[0] == str(item)\n\n def test_constructor_dtype_str_na_values(self, string_dtype):\n # https://github.com/pandas-dev/pandas/issues/21083\n ser = Series(["x", None], dtype=string_dtype)\n result = ser.isna()\n expected = Series([False, True])\n tm.assert_series_equal(result, expected)\n assert ser.iloc[1] is None\n\n ser = Series(["x", np.nan], dtype=string_dtype)\n assert np.isnan(ser.iloc[1])\n\n def test_constructor_series(self):\n index1 = ["d", "b", "a", "c"]\n index2 = sorted(index1)\n s1 = Series([4, 7, -5, 3], index=index1)\n s2 = Series(s1, index=index2)\n\n tm.assert_series_equal(s2, s1.sort_index())\n\n def test_constructor_iterable(self):\n # GH 21987\n class Iter:\n def __iter__(self) -> Iterator:\n yield from range(10)\n\n expected = Series(list(range(10)), dtype="int64")\n result = Series(Iter(), dtype="int64")\n tm.assert_series_equal(result, expected)\n\n def test_constructor_sequence(self):\n # GH 21987\n expected = Series(list(range(10)), dtype="int64")\n result = Series(range(10), dtype="int64")\n tm.assert_series_equal(result, expected)\n\n def test_constructor_single_str(self):\n # GH 21987\n expected = Series(["abc"])\n result = Series("abc")\n tm.assert_series_equal(result, expected)\n\n def test_constructor_list_like(self):\n # make sure that we are coercing different\n # list-likes to standard dtypes and not\n # platform specific\n expected = Series([1, 2, 3], dtype="int64")\n for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:\n result = Series(obj, index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n def test_constructor_boolean_index(self):\n # GH#18579\n s1 = Series([1, 2, 3], index=[4, 5, 6])\n\n index = s1 == 2\n result = Series([1, 3, 2], index=index)\n expected = Series([1, 3, 2], index=[False, True, False])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])\n def test_constructor_index_dtype(self, dtype):\n # GH 17088\n\n s = Series(Index([0, 2, 4]), dtype=dtype)\n assert s.dtype == dtype\n\n @pytest.mark.parametrize(\n "input_vals",\n [\n ([1, 2]),\n (["1", "2"]),\n (list(date_range("1/1/2011", periods=2, freq="h"))),\n (list(date_range("1/1/2011", periods=2, freq="h", tz="US/Eastern"))),\n ([Interval(left=0, right=5)]),\n ],\n )\n def test_constructor_list_str(self, input_vals, string_dtype):\n # GH 16605\n # Ensure that data elements from a list are converted to strings\n # when dtype is str, 'str', or 'U'\n result = Series(input_vals, dtype=string_dtype)\n expected = Series(input_vals).astype(string_dtype)\n tm.assert_series_equal(result, expected)\n\n def test_constructor_list_str_na(self, string_dtype):\n result = Series([1.0, 2.0, np.nan], dtype=string_dtype)\n expected = Series(["1.0", "2.0", np.nan], dtype=object)\n tm.assert_series_equal(result, expected)\n assert np.isnan(result[2])\n\n def test_constructor_generator(self):\n gen = (i for i in range(10))\n\n result = Series(gen)\n exp = Series(range(10))\n tm.assert_series_equal(result, exp)\n\n # same but with non-default index\n gen = (i for i in range(10))\n result = Series(gen, index=range(10, 20))\n exp.index = range(10, 20)\n tm.assert_series_equal(result, exp)\n\n def test_constructor_map(self):\n # GH8909\n m = (x for x in range(10))\n\n result = Series(m)\n exp = Series(range(10))\n tm.assert_series_equal(result, exp)\n\n # same but with non-default index\n m = (x for x in range(10))\n result = Series(m, index=range(10, 20))\n exp.index = range(10, 20)\n tm.assert_series_equal(result, exp)\n\n def test_constructor_categorical(self):\n cat = Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"])\n res = Series(cat)\n tm.assert_categorical_equal(res.values, cat)\n\n # can cast to a new dtype\n result = Series(Categorical([1, 2, 3]), dtype="int64")\n expected = Series([1, 2, 3], dtype="int64")\n tm.assert_series_equal(result, expected)\n\n def test_construct_from_categorical_with_dtype(self):\n # GH12574\n ser = Series(Categorical([1, 2, 3]), dtype="category")\n assert isinstance(ser.dtype, CategoricalDtype)\n\n def test_construct_intlist_values_category_dtype(self):\n ser = Series([1, 2, 3], dtype="category")\n assert isinstance(ser.dtype, CategoricalDtype)\n\n def test_constructor_categorical_with_coercion(self):\n factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])\n # test basic creation / coercion of categoricals\n s = Series(factor, name="A")\n assert s.dtype == "category"\n assert len(s) == len(factor)\n\n # in a frame\n df = DataFrame({"A": factor})\n result = df["A"]\n tm.assert_series_equal(result, s)\n result = df.iloc[:, 0]\n tm.assert_series_equal(result, s)\n assert len(df) == len(factor)\n\n df = DataFrame({"A": s})\n result = df["A"]\n tm.assert_series_equal(result, s)\n assert len(df) == len(factor)\n\n # multiples\n df = DataFrame({"A": s, "B": s, "C": 1})\n result1 = df["A"]\n result2 = df["B"]\n tm.assert_series_equal(result1, s)\n tm.assert_series_equal(result2, s, check_names=False)\n assert result2.name == "B"\n assert len(df) == len(factor)\n\n def test_constructor_categorical_with_coercion2(self):\n # GH8623\n x = DataFrame(\n [[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]],\n columns=["person_id", "person_name"],\n )\n x["person_name"] = Categorical(x.person_name) # doing this breaks transform\n\n expected = x.iloc[0].person_name\n result = x.person_name.iloc[0]\n assert result == expected\n\n result = x.person_name[0]\n assert result == expected\n\n result = x.person_name.loc[0]\n assert result == expected\n\n def test_constructor_series_to_categorical(self):\n # see GH#16524: test conversion of Series to Categorical\n series = Series(["a", "b", "c"])\n\n result = Series(series, dtype="category")\n expected = Series(["a", "b", "c"], dtype="category")\n\n tm.assert_series_equal(result, expected)\n\n def test_constructor_categorical_dtype(self):\n result = Series(\n ["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)\n )\n assert isinstance(result.dtype, CategoricalDtype)\n tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))\n assert result.cat.ordered\n\n result = Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))\n assert isinstance(result.dtype, CategoricalDtype)\n tm.assert_index_equal(result.cat.categories, Index(["b", "a"]))\n assert result.cat.ordered is False\n\n # GH 19565 - Check broadcasting of scalar with Categorical dtype\n result = Series(\n "a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)\n )\n expected = Series(\n ["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)\n )\n tm.assert_series_equal(result, expected)\n\n def test_constructor_categorical_string(self):\n # GH 26336: the string 'category' maintains existing CategoricalDtype\n cdt = CategoricalDtype(categories=list("dabc"), ordered=True)\n expected = Series(list("abcabc"), dtype=cdt)\n\n # Series(Categorical, dtype='category') keeps existing dtype\n cat = Categorical(list("abcabc"), dtype=cdt)\n result = Series(cat, dtype="category")\n tm.assert_series_equal(result, expected)\n\n # Series(Series[Categorical], dtype='category') keeps existing dtype\n result = Series(result, dtype="category")\n tm.assert_series_equal(result, expected)\n\n def test_categorical_sideeffects_free(self):\n # Passing a categorical to a Series and then changing values in either\n # the series or the categorical should not change the values in the\n # other one, IF you specify copy!\n cat = Categorical(["a", "b", "c", "a"])\n s = Series(cat, copy=True)\n assert s.cat is not cat\n s = s.cat.rename_categories([1, 2, 3])\n exp_s = np.array([1, 2, 3, 1], dtype=np.int64)\n exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)\n tm.assert_numpy_array_equal(s.__array__(), exp_s)\n tm.assert_numpy_array_equal(cat.__array__(), exp_cat)\n\n # setting\n s[0] = 2\n exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)\n tm.assert_numpy_array_equal(s.__array__(), exp_s2)\n tm.assert_numpy_array_equal(cat.__array__(), exp_cat)\n\n # however, copy is False by default\n # so this WILL change values\n cat = Categorical(["a", "b", "c", "a"])\n s = Series(cat, copy=False)\n assert s.values is cat\n s = s.cat.rename_categories([1, 2, 3])\n assert s.values is not cat\n exp_s = np.array([1, 2, 3, 1], dtype=np.int64)\n tm.assert_numpy_array_equal(s.__array__(), exp_s)\n\n s[0] = 2\n exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)\n tm.assert_numpy_array_equal(s.__array__(), exp_s2)\n\n def test_unordered_compare_equal(self):\n left = Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))\n right = Series(Categorical(["a", "b", np.nan], categories=["a", "b"]))\n tm.assert_series_equal(left, right)\n\n def test_constructor_maskedarray(self):\n data = ma.masked_all((3,), dtype=float)\n result = Series(data)\n expected = Series([np.nan, np.nan, np.nan])\n tm.assert_series_equal(result, expected)\n\n data[0] = 0.0\n data[2] = 2.0\n index = ["a", "b", "c"]\n result = Series(data, index=index)\n expected = Series([0.0, np.nan, 2.0], index=index)\n tm.assert_series_equal(result, expected)\n\n data[1] = 1.0\n result = Series(data, index=index)\n expected = Series([0.0, 1.0, 2.0], index=index)\n tm.assert_series_equal(result, expected)\n\n data = ma.masked_all((3,), dtype=int)\n result = Series(data)\n expected = Series([np.nan, np.nan, np.nan], dtype=float)\n tm.assert_series_equal(result, expected)\n\n data[0] = 0\n data[2] = 2\n index = ["a", "b", "c"]\n result = Series(data, index=index)\n expected = Series([0, np.nan, 2], index=index, dtype=float)\n tm.assert_series_equal(result, expected)\n\n data[1] = 1\n result = Series(data, index=index)\n expected = Series([0, 1, 2], index=index, dtype=int)\n with pytest.raises(AssertionError, match="Series classes are different"):\n # TODO should this be raising at all?\n # https://github.com/pandas-dev/pandas/issues/56131\n tm.assert_series_equal(result, expected)\n\n data = ma.masked_all((3,), dtype=bool)\n result = Series(data)\n expected = Series([np.nan, np.nan, np.nan], dtype=object)\n tm.assert_series_equal(result, expected)\n\n data[0] = True\n data[2] = False\n index = ["a", "b", "c"]\n result = Series(data, index=index)\n expected = Series([True, np.nan, False], index=index, dtype=object)\n tm.assert_series_equal(result, expected)\n\n data[1] = True\n result = Series(data, index=index)\n expected = Series([True, True, False], index=index, dtype=bool)\n with pytest.raises(AssertionError, match="Series classes are different"):\n # TODO should this be raising at all?\n # https://github.com/pandas-dev/pandas/issues/56131\n tm.assert_series_equal(result, expected)\n\n data = ma.masked_all((3,), dtype="M8[ns]")\n result = Series(data)\n expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")\n tm.assert_series_equal(result, expected)\n\n data[0] = datetime(2001, 1, 1)\n data[2] = datetime(2001, 1, 3)\n index = ["a", "b", "c"]\n result = Series(data, index=index)\n expected = Series(\n [datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],\n index=index,\n dtype="M8[ns]",\n )\n tm.assert_series_equal(result, expected)\n\n data[1] = datetime(2001, 1, 2)\n result = Series(data, index=index)\n expected = Series(\n [datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],\n index=index,\n dtype="M8[ns]",\n )\n tm.assert_series_equal(result, expected)\n\n def test_constructor_maskedarray_hardened(self):\n # Check numpy masked arrays with hard masks -- from GH24574\n data = ma.masked_all((3,), dtype=float).harden_mask()\n result = Series(data)\n expected = Series([np.nan, np.nan, np.nan])\n tm.assert_series_equal(result, expected)\n\n def test_series_ctor_plus_datetimeindex(self, using_copy_on_write):\n rng = date_range("20090415", "20090519", freq="B")\n data = {k: 1 for k in rng}\n\n result = Series(data, index=rng)\n if using_copy_on_write:\n assert result.index.is_(rng)\n else:\n assert result.index is rng\n\n def test_constructor_default_index(self):\n s = Series([0, 1, 2])\n tm.assert_index_equal(s.index, Index(range(3)), exact=True)\n\n @pytest.mark.parametrize(\n "input",\n [\n [1, 2, 3],\n (1, 2, 3),\n list(range(3)),\n Categorical(["a", "b", "a"]),\n (i for i in range(3)),\n (x for x in range(3)),\n ],\n )\n def test_constructor_index_mismatch(self, input):\n # GH 19342\n # test that construction of a Series with an index of different length\n # raises an error\n msg = r"Length of values \(3\) does not match length of index \(4\)"\n with pytest.raises(ValueError, match=msg):\n Series(input, index=np.arange(4))\n\n def test_constructor_numpy_scalar(self):\n # GH 19342\n # construction with a numpy scalar\n # should not raise\n result = Series(np.array(100), index=np.arange(4), dtype="int64")\n expected = Series(100, index=np.arange(4), dtype="int64")\n tm.assert_series_equal(result, expected)\n\n def test_constructor_broadcast_list(self):\n # GH 19342\n # construction with single-element container and index\n # should raise\n msg = r"Length of values \(1\) does not match length of index \(3\)"\n with pytest.raises(ValueError, match=msg):\n Series(["foo"], index=["a", "b", "c"])\n\n def test_constructor_corner(self):\n df = DataFrame(range(5), index=date_range("2020-01-01", periods=5))\n objs = [df, df]\n s = Series(objs, index=[0, 1])\n assert isinstance(s, Series)\n\n def test_constructor_sanitize(self):\n s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")\n assert s.dtype == np.dtype("i8")\n\n msg = r"Cannot convert non-finite values \(NA or inf\) to integer"\n with pytest.raises(IntCastingNaNError, match=msg):\n Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")\n\n def test_constructor_copy(self):\n # GH15125\n # test dtype parameter has no side effects on copy=True\n for data in [[1.0], np.array([1.0])]:\n x = Series(data)\n y = Series(x, copy=True, dtype=float)\n\n # copy=True maintains original data in Series\n tm.assert_series_equal(x, y)\n\n # changes to origin of copy does not affect the copy\n x[0] = 2.0\n assert not x.equals(y)\n assert x[0] == 2.0\n assert y[0] == 1.0\n\n @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite test\n @pytest.mark.parametrize(\n "index",\n [\n date_range("20170101", periods=3, tz="US/Eastern"),\n date_range("20170101", periods=3),\n timedelta_range("1 day", periods=3),\n period_range("2012Q1", periods=3, freq="Q"),\n Index(list("abc")),\n Index([1, 2, 3]),\n RangeIndex(0, 3),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_constructor_limit_copies(self, index):\n # GH 17449\n # limit copies of input\n s = Series(index)\n\n # we make 1 copy; this is just a smoke test here\n assert s._mgr.blocks[0].values is not index\n\n def test_constructor_shallow_copy(self):\n # constructing a Series from Series with copy=False should still\n # give a "shallow" copy (share data, not attributes)\n # https://github.com/pandas-dev/pandas/issues/49523\n s = Series([1, 2, 3])\n s_orig = s.copy()\n s2 = Series(s)\n assert s2._mgr is not s._mgr\n # Overwriting index of s2 doesn't change s\n s2.index = ["a", "b", "c"]\n tm.assert_series_equal(s, s_orig)\n\n def test_constructor_pass_none(self):\n s = Series(None, index=range(5))\n assert s.dtype == np.float64\n\n s = Series(None, index=range(5), dtype=object)\n assert s.dtype == np.object_\n\n # GH 7431\n # inference on the index\n s = Series(index=np.array([None]))\n expected = Series(index=Index([None]))\n tm.assert_series_equal(s, expected)\n\n def test_constructor_pass_nan_nat(self):\n # GH 13467\n exp = Series([np.nan, np.nan], dtype=np.float64)\n assert exp.dtype == np.float64\n tm.assert_series_equal(Series([np.nan, np.nan]), exp)\n tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)\n\n exp = Series([NaT, NaT])\n assert exp.dtype == "datetime64[ns]"\n tm.assert_series_equal(Series([NaT, NaT]), exp)\n tm.assert_series_equal(Series(np.array([NaT, NaT])), exp)\n\n tm.assert_series_equal(Series([NaT, np.nan]), exp)\n tm.assert_series_equal(Series(np.array([NaT, np.nan])), exp)\n\n tm.assert_series_equal(Series([np.nan, NaT]), exp)\n tm.assert_series_equal(Series(np.array([np.nan, NaT])), exp)\n\n def test_constructor_cast(self):\n msg = "could not convert string to float"\n with pytest.raises(ValueError, match=msg):\n Series(["a", "b", "c"], dtype=float)\n\n def test_constructor_signed_int_overflow_raises(self):\n # GH#41734 disallow silent overflow, enforced in 2.0\n if np_version_gt2:\n msg = "The elements provided in the data cannot all be casted to the dtype"\n err = OverflowError\n else:\n msg = "Values are too large to be losslessly converted"\n err = ValueError\n with pytest.raises(err, match=msg):\n Series([1, 200, 923442], dtype="int8")\n\n with pytest.raises(err, match=msg):\n Series([1, 200, 923442], dtype="uint8")\n\n @pytest.mark.parametrize(\n "values",\n [\n np.array([1], dtype=np.uint16),\n np.array([1], dtype=np.uint32),\n np.array([1], dtype=np.uint64),\n [np.uint16(1)],\n [np.uint32(1)],\n [np.uint64(1)],\n ],\n )\n def test_constructor_numpy_uints(self, values):\n # GH#47294\n value = values[0]\n result = Series(values)\n\n assert result[0].dtype == value.dtype\n assert result[0] == value\n\n def test_constructor_unsigned_dtype_overflow(self, any_unsigned_int_numpy_dtype):\n # see gh-15832\n if np_version_gt2:\n msg = (\n f"The elements provided in the data cannot "\n f"all be casted to the dtype {any_unsigned_int_numpy_dtype}"\n )\n else:\n msg = "Trying to coerce negative values to unsigned integers"\n with pytest.raises(OverflowError, match=msg):\n Series([-1], dtype=any_unsigned_int_numpy_dtype)\n\n def test_constructor_floating_data_int_dtype(self, frame_or_series):\n # GH#40110\n arr = np.random.default_rng(2).standard_normal(2)\n\n # Long-standing behavior (for Series, new in 2.0 for DataFrame)\n # has been to ignore the dtype on these;\n # not clear if this is what we want long-term\n # expected = frame_or_series(arr)\n\n # GH#49599 as of 2.0 we raise instead of silently retaining float dtype\n msg = "Trying to coerce float values to integer"\n with pytest.raises(ValueError, match=msg):\n frame_or_series(arr, dtype="i8")\n\n with pytest.raises(ValueError, match=msg):\n frame_or_series(list(arr), dtype="i8")\n\n # pre-2.0, when we had NaNs, we silently ignored the integer dtype\n arr[0] = np.nan\n # expected = frame_or_series(arr)\n\n msg = r"Cannot convert non-finite values \(NA or inf\) to integer"\n with pytest.raises(IntCastingNaNError, match=msg):\n frame_or_series(arr, dtype="i8")\n\n exc = IntCastingNaNError\n if frame_or_series is Series:\n # TODO: try to align these\n exc = ValueError\n msg = "cannot convert float NaN to integer"\n with pytest.raises(exc, match=msg):\n # same behavior if we pass list instead of the ndarray\n frame_or_series(list(arr), dtype="i8")\n\n # float array that can be losslessly cast to integers\n arr = np.array([1.0, 2.0], dtype="float64")\n expected = frame_or_series(arr.astype("i8"))\n\n obj = frame_or_series(arr, dtype="i8")\n tm.assert_equal(obj, expected)\n\n obj = frame_or_series(list(arr), dtype="i8")\n tm.assert_equal(obj, expected)\n\n def test_constructor_coerce_float_fail(self, any_int_numpy_dtype):\n # see gh-15832\n # Updated: make sure we treat this list the same as we would treat\n # the equivalent ndarray\n # GH#49599 pre-2.0 we silently retained float dtype, in 2.0 we raise\n vals = [1, 2, 3.5]\n\n msg = "Trying to coerce float values to integer"\n with pytest.raises(ValueError, match=msg):\n Series(vals, dtype=any_int_numpy_dtype)\n with pytest.raises(ValueError, match=msg):\n Series(np.array(vals), dtype=any_int_numpy_dtype)\n\n def test_constructor_coerce_float_valid(self, float_numpy_dtype):\n s = Series([1, 2, 3.5], dtype=float_numpy_dtype)\n expected = Series([1, 2, 3.5]).astype(float_numpy_dtype)\n tm.assert_series_equal(s, expected)\n\n def test_constructor_invalid_coerce_ints_with_float_nan(self, any_int_numpy_dtype):\n # GH 22585\n # Updated: make sure we treat this list the same as we would treat the\n # equivalent ndarray\n vals = [1, 2, np.nan]\n # pre-2.0 this would return with a float dtype, in 2.0 we raise\n\n msg = "cannot convert float NaN to integer"\n with pytest.raises(ValueError, match=msg):\n Series(vals, dtype=any_int_numpy_dtype)\n msg = r"Cannot convert non-finite values \(NA or inf\) to integer"\n with pytest.raises(IntCastingNaNError, match=msg):\n Series(np.array(vals), dtype=any_int_numpy_dtype)\n\n def test_constructor_dtype_no_cast(self, using_copy_on_write, warn_copy_on_write):\n # see gh-1572\n s = Series([1, 2, 3])\n s2 = Series(s, dtype=np.int64)\n\n warn = FutureWarning if warn_copy_on_write else None\n with tm.assert_produces_warning(warn):\n s2[1] = 5\n if using_copy_on_write:\n assert s[1] == 2\n else:\n assert s[1] == 5\n\n def test_constructor_datelike_coercion(self):\n # GH 9477\n # incorrectly inferring on dateimelike looking when object dtype is\n # specified\n s = Series([Timestamp("20130101"), "NOV"], dtype=object)\n assert s.iloc[0] == Timestamp("20130101")\n assert s.iloc[1] == "NOV"\n assert s.dtype == object\n\n def test_constructor_datelike_coercion2(self):\n # the dtype was being reset on the slicing and re-inferred to datetime\n # even thought the blocks are mixed\n belly = "216 3T19".split()\n wing1 = "2T15 4H19".split()\n wing2 = "416 4T20".split()\n mat = pd.to_datetime("2016-01-22 2019-09-07".split())\n df = DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)\n\n result = df.loc["3T19"]\n assert result.dtype == object\n result = df.loc["216"]\n assert result.dtype == object\n\n def test_constructor_mixed_int_and_timestamp(self, frame_or_series):\n # specifically Timestamp with nanos, not datetimes\n objs = [Timestamp(9), 10, NaT._value]\n result = frame_or_series(objs, dtype="M8[ns]")\n\n expected = frame_or_series([Timestamp(9), Timestamp(10), NaT])\n tm.assert_equal(result, expected)\n\n def test_constructor_datetimes_with_nulls(self):\n # gh-15869\n for arr in [\n np.array([None, None, None, None, datetime.now(), None]),\n np.array([None, None, datetime.now(), None]),\n ]:\n result = Series(arr)\n assert result.dtype == "M8[ns]"\n\n def test_constructor_dtype_datetime64(self):\n s = Series(iNaT, dtype="M8[ns]", index=range(5))\n assert isna(s).all()\n\n # in theory this should be all nulls, but since\n # we are not specifying a dtype is ambiguous\n s = Series(iNaT, index=range(5))\n assert not isna(s).all()\n\n s = Series(np.nan, dtype="M8[ns]", index=range(5))\n assert isna(s).all()\n\n s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")\n assert isna(s[1])\n assert s.dtype == "M8[ns]"\n\n s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")\n assert isna(s[1])\n assert s.dtype == "M8[ns]"\n\n def test_constructor_dtype_datetime64_10(self):\n # GH3416\n pydates = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)]\n dates = [np.datetime64(x) for x in pydates]\n\n ser = Series(dates)\n assert ser.dtype == "M8[ns]"\n\n ser.iloc[0] = np.nan\n assert ser.dtype == "M8[ns]"\n\n # GH3414 related\n expected = Series(pydates, dtype="datetime64[ms]")\n\n result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")\n tm.assert_series_equal(result, expected)\n\n result = Series(dates, dtype="datetime64[ms]")\n tm.assert_series_equal(result, expected)\n\n expected = Series(\n [NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"\n )\n result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")\n tm.assert_series_equal(result, expected)\n\n def test_constructor_dtype_datetime64_11(self):\n pydates = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)]\n dates = [np.datetime64(x) for x in pydates]\n\n dts = Series(dates, dtype="datetime64[ns]")\n\n # valid astype\n dts.astype("int64")\n\n # invalid casting\n msg = r"Converting from datetime64\[ns\] to int32 is not supported"\n with pytest.raises(TypeError, match=msg):\n dts.astype("int32")\n\n # ints are ok\n # we test with np.int64 to get similar results on\n # windows / 32-bit platforms\n result = Series(dts, dtype=np.int64)\n expected = Series(dts.astype(np.int64))\n tm.assert_series_equal(result, expected)\n\n def test_constructor_dtype_datetime64_9(self):\n # invalid dates can be help as object\n result = Series([datetime(2, 1, 1)])\n assert result[0] == datetime(2, 1, 1, 0, 0)\n\n result = Series([datetime(3000, 1, 1)])\n assert result[0] == datetime(3000, 1, 1, 0, 0)\n\n def test_constructor_dtype_datetime64_8(self):\n # don't mix types\n result = Series([Timestamp("20130101"), 1], index=["a", "b"])\n assert result["a"] == Timestamp("20130101")\n assert result["b"] == 1\n\n def test_constructor_dtype_datetime64_7(self):\n # GH6529\n # coerce datetime64 non-ns properly\n dates = date_range("01-Jan-2015", "01-Dec-2015", freq="ME")\n values2 = dates.view(np.ndarray).astype("datetime64[ns]")\n expected = Series(values2, index=dates)\n\n for unit in ["s", "D", "ms", "us", "ns"]:\n dtype = np.dtype(f"M8[{unit}]")\n values1 = dates.view(np.ndarray).astype(dtype)\n result = Series(values1, dates)\n if unit == "D":\n # for unit="D" we cast to nearest-supported reso, i.e. "s"\n dtype = np.dtype("M8[s]")\n assert result.dtype == dtype\n tm.assert_series_equal(result, expected.astype(dtype))\n\n # GH 13876\n # coerce to non-ns to object properly\n expected = Series(values2, index=dates, dtype=object)\n for dtype in ["s", "D", "ms", "us", "ns"]:\n values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")\n result = Series(values1, index=dates, dtype=object)\n tm.assert_series_equal(result, expected)\n\n # leave datetime.date alone\n dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)\n series1 = Series(dates2, dates)\n tm.assert_numpy_array_equal(series1.values, dates2)\n assert series1.dtype == object\n\n def test_constructor_dtype_datetime64_6(self):\n # as of 2.0, these no longer infer datetime64 based on the strings,\n # matching the Index behavior\n\n ser = Series([None, NaT, "2013-08-05 15:30:00.000001"])\n assert ser.dtype == object\n\n ser = Series([np.nan, NaT, "2013-08-05 15:30:00.000001"])\n assert ser.dtype == object\n\n ser = Series([NaT, None, "2013-08-05 15:30:00.000001"])\n assert ser.dtype == object\n\n ser = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"])\n assert ser.dtype == object\n\n def test_constructor_dtype_datetime64_5(self):\n # tz-aware (UTC and other tz's)\n # GH 8411\n dr = date_range("20130101", periods=3)\n assert Series(dr).iloc[0].tz is None\n dr = date_range("20130101", periods=3, tz="UTC")\n assert str(Series(dr).iloc[0].tz) == "UTC"\n dr = date_range("20130101", periods=3, tz="US/Eastern")\n assert str(Series(dr).iloc[0].tz) == "US/Eastern"\n\n def test_constructor_dtype_datetime64_4(self):\n # non-convertible\n ser = Series([1479596223000, -1479590, NaT])\n assert ser.dtype == "object"\n assert ser[2] is NaT\n assert "NaT" in str(ser)\n\n def test_constructor_dtype_datetime64_3(self):\n # if we passed a NaT it remains\n ser = Series([datetime(2010, 1, 1), datetime(2, 1, 1), NaT])\n assert ser.dtype == "object"\n assert ser[2] is NaT\n assert "NaT" in str(ser)\n\n def test_constructor_dtype_datetime64_2(self):\n # if we passed a nan it remains\n ser = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])\n assert ser.dtype == "object"\n assert ser[2] is np.nan\n assert "NaN" in str(ser)\n\n def test_constructor_with_datetime_tz(self):\n # 8260\n # support datetime64 with tz\n\n dr = date_range("20130101", periods=3, tz="US/Eastern")\n s = Series(dr)\n assert s.dtype.name == "datetime64[ns, US/Eastern]"\n assert s.dtype == "datetime64[ns, US/Eastern]"\n assert isinstance(s.dtype, DatetimeTZDtype)\n assert "datetime64[ns, US/Eastern]" in str(s)\n\n # export\n result = s.values\n assert isinstance(result, np.ndarray)\n assert result.dtype == "datetime64[ns]"\n\n exp = DatetimeIndex(result)\n exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)\n tm.assert_index_equal(dr, exp)\n\n # indexing\n result = s.iloc[0]\n assert result == Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern")\n result = s[0]\n assert result == Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern")\n\n result = s[Series([True, True, False], index=s.index)]\n tm.assert_series_equal(result, s[0:2])\n\n result = s.iloc[0:1]\n tm.assert_series_equal(result, Series(dr[0:1]))\n\n # concat\n result = pd.concat([s.iloc[0:1], s.iloc[1:]])\n tm.assert_series_equal(result, s)\n\n # short str\n assert "datetime64[ns, US/Eastern]" in str(s)\n\n # formatting with NaT\n result = s.shift()\n assert "datetime64[ns, US/Eastern]" in str(result)\n assert "NaT" in str(result)\n\n result = DatetimeIndex(s, freq="infer")\n tm.assert_index_equal(result, dr)\n\n def test_constructor_with_datetime_tz5(self):\n # long str\n ser = Series(date_range("20130101", periods=1000, tz="US/Eastern"))\n assert "datetime64[ns, US/Eastern]" in str(ser)\n\n def test_constructor_with_datetime_tz4(self):\n # inference\n ser = Series(\n [\n Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),\n Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),\n ]\n )\n assert ser.dtype == "datetime64[ns, US/Pacific]"\n assert lib.infer_dtype(ser, skipna=True) == "datetime64"\n\n def test_constructor_with_datetime_tz3(self):\n ser = Series(\n [\n Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),\n Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),\n ]\n )\n assert ser.dtype == "object"\n assert lib.infer_dtype(ser, skipna=True) == "datetime"\n\n def test_constructor_with_datetime_tz2(self):\n # with all NaT\n ser = Series(NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")\n dti = DatetimeIndex(["NaT", "NaT"], tz="US/Eastern").as_unit("ns")\n expected = Series(dti)\n tm.assert_series_equal(ser, expected)\n\n def test_constructor_no_partial_datetime_casting(self):\n # GH#40111\n vals = [\n "nan",\n Timestamp("1990-01-01"),\n "2015-03-14T16:15:14.123-08:00",\n "2019-03-04T21:56:32.620-07:00",\n None,\n ]\n ser = Series(vals)\n assert all(ser[i] is vals[i] for i in range(len(vals)))\n\n @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])\n @pytest.mark.parametrize("kind", ["M", "m"])\n @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])\n def test_construction_to_datetimelike_unit(self, arr_dtype, kind, unit):\n # tests all units\n # gh-19223\n # TODO: GH#19223 was about .astype, doesn't belong here\n dtype = f"{kind}8[{unit}]"\n arr = np.array([1, 2, 3], dtype=arr_dtype)\n ser = Series(arr)\n result = ser.astype(dtype)\n\n expected = Series(arr.astype(dtype))\n\n if unit in ["ns", "us", "ms", "s"]:\n assert result.dtype == dtype\n assert expected.dtype == dtype\n else:\n # Otherwise we cast to nearest-supported unit, i.e. seconds\n assert result.dtype == f"{kind}8[s]"\n assert expected.dtype == f"{kind}8[s]"\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", NaT, np.nan, None])\n def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):\n # GH 17415: With naive string\n result = Series([arg], dtype="datetime64[ns, CET]")\n expected = Series(Timestamp(arg)).dt.tz_localize("CET")\n tm.assert_series_equal(result, expected)\n\n def test_constructor_datetime64_bigendian(self):\n # GH#30976\n ms = np.datetime64(1, "ms")\n arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")\n\n result = Series(arr)\n expected = Series([Timestamp(ms)]).astype("M8[ms]")\n assert expected.dtype == "M8[ms]"\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])\n def test_construction_interval(self, interval_constructor):\n # construction from interval & array of intervals\n intervals = interval_constructor.from_breaks(np.arange(3), closed="right")\n result = Series(intervals)\n assert result.dtype == "interval[int64, right]"\n tm.assert_index_equal(Index(result.values), Index(intervals))\n\n @pytest.mark.parametrize(\n "data_constructor", [list, np.array], ids=["list", "ndarray[object]"]\n )\n def test_constructor_infer_interval(self, data_constructor):\n # GH 23563: consistent closed results in interval dtype\n data = [Interval(0, 1), Interval(0, 2), None]\n result = Series(data_constructor(data))\n expected = Series(IntervalArray(data))\n assert result.dtype == "interval[float64, right]"\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data_constructor", [list, np.array], ids=["list", "ndarray[object]"]\n )\n def test_constructor_interval_mixed_closed(self, data_constructor):\n # GH 23563: mixed closed results in object dtype (not interval dtype)\n data = [Interval(0, 1, closed="both"), Interval(0, 2, closed="neither")]\n result = Series(data_constructor(data))\n assert result.dtype == object\n assert result.tolist() == data\n\n def test_construction_consistency(self):\n # make sure that we are not re-localizing upon construction\n # GH 14928\n ser = Series(date_range("20130101", periods=3, tz="US/Eastern"))\n\n result = Series(ser, dtype=ser.dtype)\n tm.assert_series_equal(result, ser)\n\n result = Series(ser.dt.tz_convert("UTC"), dtype=ser.dtype)\n tm.assert_series_equal(result, ser)\n\n # Pre-2.0 dt64 values were treated as utc, which was inconsistent\n # with DatetimeIndex, which treats them as wall times, see GH#33401\n result = Series(ser.values, dtype=ser.dtype)\n expected = Series(ser.values).dt.tz_localize(ser.dtype.tz)\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(None):\n # one suggested alternative to the deprecated (changed in 2.0) usage\n middle = Series(ser.values).dt.tz_localize("UTC")\n result = middle.dt.tz_convert(ser.dtype.tz)\n tm.assert_series_equal(result, ser)\n\n with tm.assert_produces_warning(None):\n # the other suggested alternative to the deprecated usage\n result = Series(ser.values.view("int64"), dtype=ser.dtype)\n tm.assert_series_equal(result, ser)\n\n @pytest.mark.parametrize(\n "data_constructor", [list, np.array], ids=["list", "ndarray[object]"]\n )\n def test_constructor_infer_period(self, data_constructor):\n data = [Period("2000", "D"), Period("2001", "D"), None]\n result = Series(data_constructor(data))\n expected = Series(period_array(data))\n tm.assert_series_equal(result, expected)\n assert result.dtype == "Period[D]"\n\n @pytest.mark.xfail(reason="PeriodDtype Series not supported yet")\n def test_construct_from_ints_including_iNaT_scalar_period_dtype(self):\n series = Series([0, 1000, 2000, pd._libs.iNaT], dtype="period[D]")\n\n val = series[3]\n assert isna(val)\n\n series[2] = val\n assert isna(series[2])\n\n def test_constructor_period_incompatible_frequency(self):\n data = [Period("2000", "D"), Period("2001", "Y")]\n result = Series(data)\n assert result.dtype == object\n assert result.tolist() == data\n\n def test_constructor_periodindex(self):\n # GH7932\n # converting a PeriodIndex when put in a Series\n\n pi = period_range("20130101", periods=5, freq="D")\n s = Series(pi)\n assert s.dtype == "Period[D]"\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n expected = Series(pi.astype(object))\n tm.assert_series_equal(s, expected)\n\n def test_constructor_dict(self):\n d = {"a": 0.0, "b": 1.0, "c": 2.0}\n\n result = Series(d)\n expected = Series(d, index=sorted(d.keys()))\n tm.assert_series_equal(result, expected)\n\n result = Series(d, index=["b", "c", "d", "a"])\n expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])\n tm.assert_series_equal(result, expected)\n\n pidx = period_range("2020-01-01", periods=10, freq="D")\n d = {pidx[0]: 0, pidx[1]: 1}\n result = Series(d, index=pidx)\n expected = Series(np.nan, pidx, dtype=np.float64)\n expected.iloc[0] = 0\n expected.iloc[1] = 1\n tm.assert_series_equal(result, expected)\n\n def test_constructor_dict_list_value_explicit_dtype(self):\n # GH 18625\n d = {"a": [[2], [3], [4]]}\n result = Series(d, index=["a"], dtype="object")\n expected = Series(d, index=["a"])\n tm.assert_series_equal(result, expected)\n\n def test_constructor_dict_order(self):\n # GH19018\n # initialization ordering: by insertion order\n d = {"b": 1, "a": 0, "c": 2}\n result = Series(d)\n expected = Series([1, 0, 2], index=list("bac"))\n tm.assert_series_equal(result, expected)\n\n def test_constructor_dict_extension(self, ea_scalar_and_dtype, request):\n ea_scalar, ea_dtype = ea_scalar_and_dtype\n if isinstance(ea_scalar, Timestamp):\n mark = pytest.mark.xfail(\n reason="Construction from dict goes through "\n "maybe_convert_objects which casts to nano"\n )\n request.applymarker(mark)\n d = {"a": ea_scalar}\n result = Series(d, index=["a"])\n expected = Series(ea_scalar, index=["a"], dtype=ea_dtype)\n\n assert result.dtype == ea_dtype\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])\n def test_constructor_dict_nan_key(self, value):\n # GH 18480\n d = {1: "a", value: "b", float("nan"): "c", 4: "d"}\n result = Series(d).sort_values()\n expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])\n tm.assert_series_equal(result, expected)\n\n # MultiIndex:\n d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}\n result = Series(d).sort_values()\n expected = Series(\n ["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])\n )\n tm.assert_series_equal(result, expected)\n\n def test_constructor_dict_datetime64_index(self):\n # GH 9456\n\n dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]\n values = [42544017.198965244, 1234565, 40512335.181958228, -1]\n\n def create_data(constructor):\n return dict(zip((constructor(x) for x in dates_as_str), values))\n\n data_datetime64 = create_data(np.datetime64)\n data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))\n data_Timestamp = create_data(Timestamp)\n\n expected = Series(values, (Timestamp(x) for x in dates_as_str))\n\n result_datetime64 = Series(data_datetime64)\n result_datetime = Series(data_datetime)\n result_Timestamp = Series(data_Timestamp)\n\n tm.assert_series_equal(result_datetime64, expected)\n tm.assert_series_equal(result_datetime, expected)\n tm.assert_series_equal(result_Timestamp, expected)\n\n def test_constructor_dict_tuple_indexer(self):\n # GH 12948\n data = {(1, 1, None): -1.0}\n result = Series(data)\n expected = Series(\n -1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])\n )\n tm.assert_series_equal(result, expected)\n\n def test_constructor_mapping(self, non_dict_mapping_subclass):\n # GH 29788\n ndm = non_dict_mapping_subclass({3: "three"})\n result = Series(ndm)\n expected = Series(["three"], index=[3])\n\n tm.assert_series_equal(result, expected)\n\n def test_constructor_list_of_tuples(self):\n data = [(1, 1), (2, 2), (2, 3)]\n s = Series(data)\n assert list(s) == data\n\n def test_constructor_tuple_of_tuples(self):\n data = ((1, 1), (2, 2), (2, 3))\n s = Series(data)\n assert tuple(s) == data\n\n def test_constructor_dict_of_tuples(self):\n data = {(1, 2): 3, (None, 5): 6}\n result = Series(data).sort_values()\n expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)]))\n tm.assert_series_equal(result, expected)\n\n # https://github.com/pandas-dev/pandas/issues/22698\n @pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")\n def test_fromDict(self, using_infer_string):\n data = {"a": 0, "b": 1, "c": 2, "d": 3}\n\n series = Series(data)\n tm.assert_is_sorted(series.index)\n\n data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()}\n series = Series(data)\n assert series.dtype == np.object_\n\n data = {"a": 0, "b": "1", "c": "2", "d": "3"}\n series = Series(data)\n assert series.dtype == np.object_ if not using_infer_string else "str"\n\n data = {"a": "0", "b": "1"}\n series = Series(data, dtype=float)\n assert series.dtype == np.float64\n\n def test_fromValue(self, datetime_series, using_infer_string):\n nans = Series(np.nan, index=datetime_series.index, dtype=np.float64)\n assert nans.dtype == np.float64\n assert len(nans) == len(datetime_series)\n\n strings = Series("foo", index=datetime_series.index)\n assert strings.dtype == np.object_ if not using_infer_string else "str"\n assert len(strings) == len(datetime_series)\n\n d = datetime.now()\n dates = Series(d, index=datetime_series.index)\n assert dates.dtype == "M8[us]"\n assert len(dates) == len(datetime_series)\n\n # GH12336\n # Test construction of categorical series from value\n categorical = Series(0, index=datetime_series.index, dtype="category")\n expected = Series(0, index=datetime_series.index).astype("category")\n assert categorical.dtype == "category"\n assert len(categorical) == len(datetime_series)\n tm.assert_series_equal(categorical, expected)\n\n def test_constructor_dtype_timedelta64(self):\n # basic\n td = Series([timedelta(days=i) for i in range(3)])\n assert td.dtype == "timedelta64[ns]"\n\n td = Series([timedelta(days=1)])\n assert td.dtype == "timedelta64[ns]"\n\n td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")])\n\n assert td.dtype == "timedelta64[ns]"\n\n # mixed with NaT\n td = Series([timedelta(days=1), NaT], dtype="m8[ns]")\n assert td.dtype == "timedelta64[ns]"\n\n td = Series([timedelta(days=1), np.nan], dtype="m8[ns]")\n assert td.dtype == "timedelta64[ns]"\n\n td = Series([np.timedelta64(300000000), NaT], dtype="m8[ns]")\n assert td.dtype == "timedelta64[ns]"\n\n # improved inference\n # GH5689\n td = Series([np.timedelta64(300000000), NaT])\n assert td.dtype == "timedelta64[ns]"\n\n # because iNaT is int, not coerced to timedelta\n td = Series([np.timedelta64(300000000), iNaT])\n assert td.dtype == "object"\n\n td = Series([np.timedelta64(300000000), np.nan])\n assert td.dtype == "timedelta64[ns]"\n\n td = Series([NaT, np.timedelta64(300000000)])\n assert td.dtype == "timedelta64[ns]"\n\n td = Series([np.timedelta64(1, "s")])\n assert td.dtype == "timedelta64[ns]"\n\n # valid astype\n td.astype("int64")\n\n # invalid casting\n msg = r"Converting from timedelta64\[ns\] to int32 is not supported"\n with pytest.raises(TypeError, match=msg):\n td.astype("int32")\n\n # this is an invalid casting\n msg = "|".join(\n [\n "Could not convert object to NumPy timedelta",\n "Could not convert 'foo' to NumPy timedelta",\n ]\n )\n with pytest.raises(ValueError, match=msg):\n Series([timedelta(days=1), "foo"], dtype="m8[ns]")\n\n # leave as object here\n td = Series([timedelta(days=i) for i in range(3)] + ["foo"])\n assert td.dtype == "object"\n\n # as of 2.0, these no longer infer timedelta64 based on the strings,\n # matching Index behavior\n ser = Series([None, NaT, "1 Day"])\n assert ser.dtype == object\n\n ser = Series([np.nan, NaT, "1 Day"])\n assert ser.dtype == object\n\n ser = Series([NaT, None, "1 Day"])\n assert ser.dtype == object\n\n ser = Series([NaT, np.nan, "1 Day"])\n assert ser.dtype == object\n\n # GH 16406\n def test_constructor_mixed_tz(self):\n s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")])\n expected = Series(\n [Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")],\n dtype="object",\n )\n tm.assert_series_equal(s, expected)\n\n def test_NaT_scalar(self):\n series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]")\n\n val = series[3]\n assert isna(val)\n\n series[2] = val\n assert isna(series[2])\n\n def test_NaT_cast(self):\n # GH10747\n result = Series([np.nan]).astype("M8[ns]")\n expected = Series([NaT], dtype="M8[ns]")\n tm.assert_series_equal(result, expected)\n\n def test_constructor_name_hashable(self):\n for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]:\n for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]:\n s = Series(data, name=n)\n assert s.name == n\n\n def test_constructor_name_unhashable(self):\n msg = r"Series\.name must be a hashable type"\n for n in [["name_list"], np.ones(2), {1: 2}]:\n for data in [["name_list"], np.ones(2), {1: 2}]:\n with pytest.raises(TypeError, match=msg):\n Series(data, name=n)\n\n def test_auto_conversion(self):\n series = Series(list(date_range("1/1/2000", periods=10)))\n assert series.dtype == "M8[ns]"\n\n def test_convert_non_ns(self):\n # convert from a numpy array of non-ns timedelta64\n arr = np.array([1, 2, 3], dtype="timedelta64[s]")\n ser = Series(arr)\n assert ser.dtype == arr.dtype\n\n tdi = timedelta_range("00:00:01", periods=3, freq="s").as_unit("s")\n expected = Series(tdi)\n assert expected.dtype == arr.dtype\n tm.assert_series_equal(ser, expected)\n\n # convert from a numpy array of non-ns datetime64\n arr = np.array(\n ["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]"\n )\n ser = Series(arr)\n expected = Series(date_range("20130101", periods=3, freq="D"), dtype="M8[s]")\n assert expected.dtype == "M8[s]"\n tm.assert_series_equal(ser, expected)\n\n arr = np.array(\n ["2013-01-01 00:00:01", "2013-01-01 00:00:02", "2013-01-01 00:00:03"],\n dtype="datetime64[s]",\n )\n ser = Series(arr)\n expected = Series(\n date_range("20130101 00:00:01", periods=3, freq="s"), dtype="M8[s]"\n )\n assert expected.dtype == "M8[s]"\n tm.assert_series_equal(ser, expected)\n\n @pytest.mark.parametrize(\n "index",\n [\n date_range("1/1/2000", periods=10),\n timedelta_range("1 day", periods=10),\n period_range("2000-Q1", periods=10, freq="Q"),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_constructor_cant_cast_datetimelike(self, index):\n # floats are not ok\n # strip Index to convert PeriodIndex -> Period\n # We don't care whether the error message says\n # PeriodIndex or PeriodArray\n msg = f"Cannot cast {type(index).__name__.rstrip('Index')}.*? to "\n\n with pytest.raises(TypeError, match=msg):\n Series(index, dtype=float)\n\n # ints are ok\n # we test with np.int64 to get similar results on\n # windows / 32-bit platforms\n result = Series(index, dtype=np.int64)\n expected = Series(index.astype(np.int64))\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "index",\n [\n date_range("1/1/2000", periods=10),\n timedelta_range("1 day", periods=10),\n period_range("2000-Q1", periods=10, freq="Q"),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_constructor_cast_object(self, index):\n s = Series(index, dtype=object)\n exp = Series(index).astype(object)\n tm.assert_series_equal(s, exp)\n\n s = Series(Index(index, dtype=object), dtype=object)\n exp = Series(index).astype(object)\n tm.assert_series_equal(s, exp)\n\n s = Series(index.astype(object), dtype=object)\n exp = Series(index).astype(object)\n tm.assert_series_equal(s, exp)\n\n @pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])\n def test_constructor_generic_timestamp_no_frequency(self, dtype, request):\n # see gh-15524, gh-15987\n msg = "dtype has no unit. Please pass in"\n\n if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:\n mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")\n request.applymarker(mark)\n\n with pytest.raises(ValueError, match=msg):\n Series([], dtype=dtype)\n\n @pytest.mark.parametrize("unit", ["ps", "as", "fs", "Y", "M", "W", "D", "h", "m"])\n @pytest.mark.parametrize("kind", ["m", "M"])\n def test_constructor_generic_timestamp_bad_frequency(self, kind, unit):\n # see gh-15524, gh-15987\n # as of 2.0 we raise on any non-supported unit rather than silently\n # cast to nanos; previously we only raised for frequencies higher\n # than ns\n dtype = f"{kind}8[{unit}]"\n\n msg = "dtype=.* is not supported. Supported resolutions are"\n with pytest.raises(TypeError, match=msg):\n Series([], dtype=dtype)\n\n with pytest.raises(TypeError, match=msg):\n # pre-2.0 the DataFrame cast raised but the Series case did not\n DataFrame([[0]], dtype=dtype)\n\n @pytest.mark.parametrize("dtype", [None, "uint8", "category"])\n def test_constructor_range_dtype(self, dtype):\n # GH 16804\n expected = Series([0, 1, 2, 3, 4], dtype=dtype or "int64")\n result = Series(range(5), dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n def test_constructor_range_overflows(self):\n # GH#30173 range objects that overflow int64\n rng = range(2**63, 2**63 + 4)\n ser = Series(rng)\n expected = Series(list(rng))\n tm.assert_series_equal(ser, expected)\n assert list(ser) == list(rng)\n assert ser.dtype == np.uint64\n\n rng2 = range(2**63 + 4, 2**63, -1)\n ser2 = Series(rng2)\n expected2 = Series(list(rng2))\n tm.assert_series_equal(ser2, expected2)\n assert list(ser2) == list(rng2)\n assert ser2.dtype == np.uint64\n\n rng3 = range(-(2**63), -(2**63) - 4, -1)\n ser3 = Series(rng3)\n expected3 = Series(list(rng3))\n tm.assert_series_equal(ser3, expected3)\n assert list(ser3) == list(rng3)\n assert ser3.dtype == object\n\n rng4 = range(2**73, 2**73 + 4)\n ser4 = Series(rng4)\n expected4 = Series(list(rng4))\n tm.assert_series_equal(ser4, expected4)\n assert list(ser4) == list(rng4)\n assert ser4.dtype == object\n\n def test_constructor_tz_mixed_data(self):\n # GH 13051\n dt_list = [\n Timestamp("2016-05-01 02:03:37"),\n Timestamp("2016-04-30 19:03:37-0700", tz="US/Pacific"),\n ]\n result = Series(dt_list)\n expected = Series(dt_list, dtype=object)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("pydt", [True, False])\n def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt):\n # GH#25843, GH#41555, GH#33401\n tz = tz_aware_fixture\n ts = Timestamp("2019", tz=tz)\n if pydt:\n ts = ts.to_pydatetime()\n\n msg = (\n "Cannot convert timezone-aware data to timezone-naive dtype. "\n r"Use pd.Series\(values\).dt.tz_localize\(None\) instead."\n )\n with pytest.raises(ValueError, match=msg):\n Series([ts], dtype="datetime64[ns]")\n\n with pytest.raises(ValueError, match=msg):\n Series(np.array([ts], dtype=object), dtype="datetime64[ns]")\n\n with pytest.raises(ValueError, match=msg):\n Series({0: ts}, dtype="datetime64[ns]")\n\n msg = "Cannot unbox tzaware Timestamp to tznaive dtype"\n with pytest.raises(TypeError, match=msg):\n Series(ts, index=[0], dtype="datetime64[ns]")\n\n def test_constructor_datetime64(self):\n rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")\n dates = np.asarray(rng)\n\n series = Series(dates)\n assert np.issubdtype(series.dtype, np.dtype("M8[ns]"))\n\n def test_constructor_datetimelike_scalar_to_string_dtype(\n self, nullable_string_dtype\n ):\n # https://github.com/pandas-dev/pandas/pull/33846\n result = Series("M", index=[1, 2, 3], dtype=nullable_string_dtype)\n expected = Series(["M", "M", "M"], index=[1, 2, 3], dtype=nullable_string_dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "values",\n [\n [np.datetime64("2012-01-01"), np.datetime64("2013-01-01")],\n ["2012-01-01", "2013-01-01"],\n ],\n )\n def test_constructor_sparse_datetime64(self, values):\n # https://github.com/pandas-dev/pandas/issues/35762\n dtype = pd.SparseDtype("datetime64[ns]")\n result = Series(values, dtype=dtype)\n arr = pd.arrays.SparseArray(values, dtype=dtype)\n expected = Series(arr)\n tm.assert_series_equal(result, expected)\n\n def test_construction_from_ordered_collection(self):\n # https://github.com/pandas-dev/pandas/issues/36044\n result = Series({"a": 1, "b": 2}.keys())\n expected = Series(["a", "b"])\n tm.assert_series_equal(result, expected)\n\n result = Series({"a": 1, "b": 2}.values())\n expected = Series([1, 2])\n tm.assert_series_equal(result, expected)\n\n def test_construction_from_large_int_scalar_no_overflow(self):\n # https://github.com/pandas-dev/pandas/issues/36291\n n = 1_000_000_000_000_000_000_000\n result = Series(n, index=[0])\n expected = Series(n)\n tm.assert_series_equal(result, expected)\n\n def test_constructor_list_of_periods_infers_period_dtype(self):\n series = Series(list(period_range("2000-01-01", periods=10, freq="D")))\n assert series.dtype == "Period[D]"\n\n series = Series(\n [Period("2011-01-01", freq="D"), Period("2011-02-01", freq="D")]\n )\n assert series.dtype == "Period[D]"\n\n def test_constructor_subclass_dict(self, dict_subclass):\n data = dict_subclass((x, 10.0 * x) for x in range(10))\n series = Series(data)\n expected = Series(dict(data.items()))\n tm.assert_series_equal(series, expected)\n\n def test_constructor_ordereddict(self):\n # GH3283\n data = OrderedDict(\n (f"col{i}", np.random.default_rng(2).random()) for i in range(12)\n )\n\n series = Series(data)\n expected = Series(list(data.values()), list(data.keys()))\n tm.assert_series_equal(series, expected)\n\n # Test with subclass\n class A(OrderedDict):\n pass\n\n series = Series(A(data))\n tm.assert_series_equal(series, expected)\n\n def test_constructor_dict_multiindex(self):\n d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}\n _d = sorted(d.items())\n result = Series(d)\n expected = Series(\n [x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d])\n )\n tm.assert_series_equal(result, expected)\n\n d["z"] = 111.0\n _d.insert(0, ("z", d["z"]))\n result = Series(d)\n expected = Series(\n [x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False)\n )\n result = result.reindex(index=expected.index)\n tm.assert_series_equal(result, expected)\n\n def test_constructor_dict_multiindex_reindex_flat(self):\n # construction involves reindexing with a MultiIndex corner case\n data = {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2, "j": np.nan}\n expected = Series(data)\n\n result = Series(expected[:-1].to_dict(), index=expected.index)\n tm.assert_series_equal(result, expected)\n\n def test_constructor_dict_timedelta_index(self):\n # GH #12169 : Resample category data with timedelta index\n # construct Series from dict as data and TimedeltaIndex as index\n # will result NaN in result Series data\n expected = Series(\n data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")\n )\n\n result = Series(\n data={\n pd.to_timedelta(0, unit="s"): "A",\n pd.to_timedelta(10, unit="s"): "B",\n pd.to_timedelta(20, unit="s"): "C",\n },\n index=pd.to_timedelta([0, 10, 20], unit="s"),\n )\n tm.assert_series_equal(result, expected)\n\n def test_constructor_infer_index_tz(self):\n values = [188.5, 328.25]\n tzinfo = tzoffset(None, 7200)\n index = [\n datetime(2012, 5, 11, 11, tzinfo=tzinfo),\n datetime(2012, 5, 11, 12, tzinfo=tzinfo),\n ]\n series = Series(data=values, index=index)\n\n assert series.index.tz == tzinfo\n\n # it works! GH#2443\n repr(series.index[0])\n\n def test_constructor_with_pandas_dtype(self):\n # going through 2D->1D path\n vals = [(1,), (2,), (3,)]\n ser = Series(vals)\n dtype = ser.array.dtype # NumpyEADtype\n ser2 = Series(vals, dtype=dtype)\n tm.assert_series_equal(ser, ser2)\n\n def test_constructor_int_dtype_missing_values(self):\n # GH#43017\n result = Series(index=[0], dtype="int64")\n expected = Series(np.nan, index=[0], dtype="float64")\n tm.assert_series_equal(result, expected)\n\n def test_constructor_bool_dtype_missing_values(self):\n # GH#43018\n result = Series(index=[0], dtype="bool")\n expected = Series(True, index=[0], dtype="bool")\n tm.assert_series_equal(result, expected)\n\n def test_constructor_int64_dtype(self, any_int_dtype):\n # GH#44923\n result = Series(["0", "1", "2"], dtype=any_int_dtype)\n expected = Series([0, 1, 2], dtype=any_int_dtype)\n tm.assert_series_equal(result, expected)\n\n def test_constructor_raise_on_lossy_conversion_of_strings(self):\n # GH#44923\n if not np_version_gt2:\n raises = pytest.raises(\n ValueError, match="string values cannot be losslessly cast to int8"\n )\n else:\n raises = pytest.raises(\n OverflowError, match="The elements provided in the data"\n )\n with raises:\n Series(["128"], dtype="int8")\n\n def test_constructor_dtype_timedelta_alternative_construct(self):\n # GH#35465\n result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]")\n expected = Series(pd.to_timedelta([1000000, 200000, 3000000], unit="ns"))\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.xfail(\n reason="Not clear what the correct expected behavior should be with "\n "integers now that we support non-nano. ATM (2022-10-08) we treat ints "\n "as nanoseconds, then cast to the requested dtype. xref #48312"\n )\n def test_constructor_dtype_timedelta_ns_s(self):\n # GH#35465\n result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]")\n expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.xfail(\n reason="Not clear what the correct expected behavior should be with "\n "integers now that we support non-nano. ATM (2022-10-08) we treat ints "\n "as nanoseconds, then cast to the requested dtype. xref #48312"\n )\n def test_constructor_dtype_timedelta_ns_s_astype_int64(self):\n # GH#35465\n result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]").astype(\n "int64"\n )\n expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]").astype(\n "int64"\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:elementwise comparison failed:DeprecationWarning"\n )\n @pytest.mark.parametrize("func", [Series, DataFrame, Index, pd.array])\n def test_constructor_mismatched_null_nullable_dtype(\n self, func, any_numeric_ea_dtype\n ):\n # GH#44514\n msg = "|".join(\n [\n "cannot safely cast non-equivalent object",\n r"int\(\) argument must be a string, a bytes-like object "\n "or a (real )?number",\n r"Cannot cast array data from dtype\('O'\) to dtype\('float64'\) "\n "according to the rule 'safe'",\n "object cannot be converted to a FloatingDtype",\n "'values' contains non-numeric NA",\n ]\n )\n\n for null in tm.NP_NAT_OBJECTS + [NaT]:\n with pytest.raises(TypeError, match=msg):\n func([null, 1.0, 3.0], dtype=any_numeric_ea_dtype)\n\n def test_series_constructor_ea_int_from_bool(self):\n # GH#42137\n result = Series([True, False, True, pd.NA], dtype="Int64")\n expected = Series([1, 0, 1, pd.NA], dtype="Int64")\n tm.assert_series_equal(result, expected)\n\n result = Series([True, False, True], dtype="Int64")\n expected = Series([1, 0, 1], dtype="Int64")\n tm.assert_series_equal(result, expected)\n\n def test_series_constructor_ea_int_from_string_bool(self):\n # GH#42137\n with pytest.raises(ValueError, match="invalid literal"):\n Series(["True", "False", "True", pd.NA], dtype="Int64")\n\n @pytest.mark.parametrize("val", [1, 1.0])\n def test_series_constructor_overflow_uint_ea(self, val):\n # GH#38798\n max_val = np.iinfo(np.uint64).max - 1\n result = Series([max_val, val], dtype="UInt64")\n expected = Series(np.array([max_val, 1], dtype="uint64"), dtype="UInt64")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("val", [1, 1.0])\n def test_series_constructor_overflow_uint_ea_with_na(self, val):\n # GH#38798\n max_val = np.iinfo(np.uint64).max - 1\n result = Series([max_val, val, pd.NA], dtype="UInt64")\n expected = Series(\n IntegerArray(\n np.array([max_val, 1, 0], dtype="uint64"),\n np.array([0, 0, 1], dtype=np.bool_),\n )\n )\n tm.assert_series_equal(result, expected)\n\n def test_series_constructor_overflow_uint_with_nan(self):\n # GH#38798\n max_val = np.iinfo(np.uint64).max - 1\n result = Series([max_val, np.nan], dtype="UInt64")\n expected = Series(\n IntegerArray(\n np.array([max_val, 1], dtype="uint64"),\n np.array([0, 1], dtype=np.bool_),\n )\n )\n tm.assert_series_equal(result, expected)\n\n def test_series_constructor_ea_all_na(self):\n # GH#38798\n result = Series([np.nan, np.nan], dtype="UInt64")\n expected = Series(\n IntegerArray(\n np.array([1, 1], dtype="uint64"),\n np.array([1, 1], dtype=np.bool_),\n )\n )\n tm.assert_series_equal(result, expected)\n\n def test_series_from_index_dtype_equal_does_not_copy(self):\n # GH#52008\n idx = Index([1, 2, 3])\n expected = idx.copy(deep=True)\n ser = Series(idx, dtype="int64")\n ser.iloc[0] = 100\n tm.assert_index_equal(idx, expected)\n\n def test_series_string_inference(self):\n # GH#54430\n with pd.option_context("future.infer_string", True):\n ser = Series(["a", "b"])\n dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan)\n expected = Series(["a", "b"], dtype=dtype)\n tm.assert_series_equal(ser, expected)\n\n expected = Series(["a", 1], dtype="object")\n with pd.option_context("future.infer_string", True):\n ser = Series(["a", 1])\n tm.assert_series_equal(ser, expected)\n\n @pytest.mark.parametrize("na_value", [None, np.nan, pd.NA])\n def test_series_string_with_na_inference(self, na_value):\n # GH#54430\n with pd.option_context("future.infer_string", True):\n ser = Series(["a", na_value])\n dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan)\n expected = Series(["a", None], dtype=dtype)\n tm.assert_series_equal(ser, expected)\n\n def test_series_string_inference_scalar(self):\n # GH#54430\n with pd.option_context("future.infer_string", True):\n ser = Series("a", index=[1])\n dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan)\n expected = Series("a", index=[1], dtype=dtype)\n tm.assert_series_equal(ser, expected)\n\n def test_series_string_inference_array_string_dtype(self):\n # GH#54496\n with pd.option_context("future.infer_string", True):\n ser = Series(np.array(["a", "b"]))\n dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan)\n expected = Series(["a", "b"], dtype=dtype)\n tm.assert_series_equal(ser, expected)\n\n def test_series_string_inference_storage_definition(self):\n # https://github.com/pandas-dev/pandas/issues/54793\n # but after PDEP-14 (string dtype), it was decided to keep dtype="string"\n # returning the NA string dtype, so expected is changed from\n # "string[pyarrow_numpy]" to "string[python]"\n expected = Series(["a", "b"], dtype="string[python]")\n with pd.option_context("future.infer_string", True):\n result = Series(["a", "b"], dtype="string")\n tm.assert_series_equal(result, expected)\n\n expected = Series(["a", "b"], dtype=pd.StringDtype(na_value=np.nan))\n with pd.option_context("future.infer_string", True):\n result = Series(["a", "b"], dtype="str")\n tm.assert_series_equal(result, expected)\n\n def test_series_constructor_infer_string_scalar(self):\n # GH#55537\n with pd.option_context("future.infer_string", True):\n ser = Series("a", index=[1, 2], dtype="string[python]")\n expected = Series(["a", "a"], index=[1, 2], dtype="string[python]")\n tm.assert_series_equal(ser, expected)\n assert ser.dtype.storage == "python"\n\n def test_series_string_inference_na_first(self):\n # GH#55655\n with pd.option_context("future.infer_string", True):\n result = Series([pd.NA, "b"])\n dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan)\n expected = Series([None, "b"], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n def test_inference_on_pandas_objects(self):\n # GH#56012\n ser = Series([Timestamp("2019-12-31")], dtype=object)\n with tm.assert_produces_warning(None):\n # This doesn't do inference\n result = Series(ser)\n assert result.dtype == np.object_\n\n idx = Index([Timestamp("2019-12-31")], dtype=object)\n\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n result = Series(idx)\n assert result.dtype != np.object_\n\n\nclass TestSeriesConstructorIndexCoercion:\n def test_series_constructor_datetimelike_index_coercion(self):\n idx = date_range("2020-01-01", periods=5)\n ser = Series(\n np.random.default_rng(2).standard_normal(len(idx)), idx.astype(object)\n )\n # as of 2.0, we no longer silently cast the object-dtype index\n # to DatetimeIndex GH#39307, GH#23598\n assert not isinstance(ser.index, DatetimeIndex)\n\n @pytest.mark.parametrize("container", [None, np.array, Series, Index])\n @pytest.mark.parametrize("data", [1.0, range(4)])\n def test_series_constructor_infer_multiindex(self, container, data):\n indexes = [["a", "a", "b", "b"], ["x", "y", "x", "y"]]\n if container is not None:\n indexes = [container(ind) for ind in indexes]\n\n multi = Series(data, index=indexes)\n assert isinstance(multi.index, MultiIndex)\n\n # TODO: make this not cast to object in pandas 3.0\n @pytest.mark.skipif(\n not np_version_gt2, reason="StringDType only available in numpy 2 and above"\n )\n @pytest.mark.parametrize(\n "data",\n [\n ["a", "b", "c"],\n ["a", "b", np.nan],\n ],\n )\n def test_np_string_array_object_cast(self, data):\n from numpy.dtypes import StringDType\n\n arr = np.array(data, dtype=StringDType())\n res = Series(arr)\n assert res.dtype == np.object_\n assert (res == data).all()\n\n\nclass TestSeriesConstructorInternals:\n def test_constructor_no_pandas_array(self, using_array_manager):\n ser = Series([1, 2, 3])\n result = Series(ser.array)\n tm.assert_series_equal(ser, result)\n if not using_array_manager:\n assert isinstance(result._mgr.blocks[0], NumpyBlock)\n assert result._mgr.blocks[0].is_numeric\n\n @td.skip_array_manager_invalid_test\n def test_from_array(self):\n result = Series(pd.array(["1h", "2h"], dtype="timedelta64[ns]"))\n assert result._mgr.blocks[0].is_extension is False\n\n result = Series(pd.array(["2015"], dtype="datetime64[ns]"))\n assert result._mgr.blocks[0].is_extension is False\n\n @td.skip_array_manager_invalid_test\n def test_from_list_dtype(self):\n result = Series(["1h", "2h"], dtype="timedelta64[ns]")\n assert result._mgr.blocks[0].is_extension is False\n\n result = Series(["2015"], dtype="datetime64[ns]")\n assert result._mgr.blocks[0].is_extension is False\n\n\ndef test_constructor(rand_series_with_duplicate_datetimeindex):\n dups = rand_series_with_duplicate_datetimeindex\n assert isinstance(dups, Series)\n assert isinstance(dups.index, DatetimeIndex)\n\n\n@pytest.mark.parametrize(\n "input_dict,expected",\n [\n ({0: 0}, np.array([[0]], dtype=np.int64)),\n ({"a": "a"}, np.array([["a"]], dtype=object)),\n ({1: 1}, np.array([[1]], dtype=np.int64)),\n ],\n)\ndef test_numpy_array(input_dict, expected):\n result = np.array([Series(input_dict)])\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_index_ordered_dict_keys():\n # GH 22077\n\n param_index = OrderedDict(\n [\n ((("a", "b"), ("c", "d")), 1),\n ((("a", None), ("c", "d")), 2),\n ]\n )\n series = Series([1, 2], index=param_index.keys())\n expected = Series(\n [1, 2],\n index=MultiIndex.from_tuples(\n [(("a", "b"), ("c", "d")), (("a", None), ("c", "d"))]\n ),\n )\n tm.assert_series_equal(series, expected)\n\n\n@pytest.mark.parametrize(\n "input_list",\n [\n [1, complex("nan"), 2],\n [1 + 1j, complex("nan"), 2 + 2j],\n ],\n)\ndef test_series_with_complex_nan(input_list):\n # GH#53627\n ser = Series(input_list)\n result = Series(ser.array)\n assert ser.dtype == "complex128"\n tm.assert_series_equal(ser, result)\n | .venv\Lib\site-packages\pandas\tests\series\test_constructors.py | test_constructors.py | Python | 85,825 | 0.75 | 0.106707 | 0.135433 | node-utils | 581 | 2023-11-12T23:33:38.960690 | Apache-2.0 | true | 0c18867d4d17119c8066379e8ec8a3a7 |
"""\nTests for Series cumulative operations.\n\nSee also\n--------\ntests.frame.test_cumulative\n"""\n\nimport re\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\nmethods = {\n "cumsum": np.cumsum,\n "cumprod": np.cumprod,\n "cummin": np.minimum.accumulate,\n "cummax": np.maximum.accumulate,\n}\n\n\nclass TestSeriesCumulativeOps:\n @pytest.mark.parametrize("func", [np.cumsum, np.cumprod])\n def test_datetime_series(self, datetime_series, func):\n tm.assert_numpy_array_equal(\n func(datetime_series).values,\n func(np.array(datetime_series)),\n check_dtype=True,\n )\n\n # with missing values\n ts = datetime_series.copy()\n ts[::2] = np.nan\n\n result = func(ts)[1::2]\n expected = func(np.array(ts.dropna()))\n\n tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)\n\n @pytest.mark.parametrize("method", ["cummin", "cummax"])\n def test_cummin_cummax(self, datetime_series, method):\n ufunc = methods[method]\n\n result = getattr(datetime_series, method)().values\n expected = ufunc(np.array(datetime_series))\n\n tm.assert_numpy_array_equal(result, expected)\n ts = datetime_series.copy()\n ts[::2] = np.nan\n result = getattr(ts, method)()[1::2]\n expected = ufunc(ts.dropna())\n\n result.index = result.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "ts",\n [\n pd.Timedelta(0),\n pd.Timestamp("1999-12-31"),\n pd.Timestamp("1999-12-31").tz_localize("US/Pacific"),\n ],\n )\n @pytest.mark.parametrize(\n "method, skipna, exp_tdi",\n [\n ["cummax", True, ["NaT", "2 days", "NaT", "2 days", "NaT", "3 days"]],\n ["cummin", True, ["NaT", "2 days", "NaT", "1 days", "NaT", "1 days"]],\n [\n "cummax",\n False,\n ["NaT", "NaT", "NaT", "NaT", "NaT", "NaT"],\n ],\n [\n "cummin",\n False,\n ["NaT", "NaT", "NaT", "NaT", "NaT", "NaT"],\n ],\n ],\n )\n def test_cummin_cummax_datetimelike(self, ts, method, skipna, exp_tdi):\n # with ts==pd.Timedelta(0), we are testing td64; with naive Timestamp\n # we are testing datetime64[ns]; with Timestamp[US/Pacific]\n # we are testing dt64tz\n tdi = pd.to_timedelta(["NaT", "2 days", "NaT", "1 days", "NaT", "3 days"])\n ser = pd.Series(tdi + ts)\n\n exp_tdi = pd.to_timedelta(exp_tdi)\n expected = pd.Series(exp_tdi + ts)\n result = getattr(ser, method)(skipna=skipna)\n tm.assert_series_equal(expected, result)\n\n @pytest.mark.parametrize(\n "func, exp",\n [\n ("cummin", pd.Period("2012-1-1", freq="D")),\n ("cummax", pd.Period("2012-1-2", freq="D")),\n ],\n )\n def test_cummin_cummax_period(self, func, exp):\n # GH#28385\n ser = pd.Series(\n [pd.Period("2012-1-1", freq="D"), pd.NaT, pd.Period("2012-1-2", freq="D")]\n )\n result = getattr(ser, func)(skipna=False)\n expected = pd.Series([pd.Period("2012-1-1", freq="D"), pd.NaT, pd.NaT])\n tm.assert_series_equal(result, expected)\n\n result = getattr(ser, func)(skipna=True)\n expected = pd.Series([pd.Period("2012-1-1", freq="D"), pd.NaT, exp])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "arg",\n [\n [False, False, False, True, True, False, False],\n [False, False, False, False, False, False, False],\n ],\n )\n @pytest.mark.parametrize(\n "func", [lambda x: x, lambda x: ~x], ids=["identity", "inverse"]\n )\n @pytest.mark.parametrize("method", methods.keys())\n def test_cummethods_bool(self, arg, func, method):\n # GH#6270\n # checking Series method vs the ufunc applied to the values\n\n ser = func(pd.Series(arg))\n ufunc = methods[method]\n\n exp_vals = ufunc(ser.values)\n expected = pd.Series(exp_vals)\n\n result = getattr(ser, method)()\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "method, expected",\n [\n ["cumsum", pd.Series([0, 1, np.nan, 1], dtype=object)],\n ["cumprod", pd.Series([False, 0, np.nan, 0])],\n ["cummin", pd.Series([False, False, np.nan, False])],\n ["cummax", pd.Series([False, True, np.nan, True])],\n ],\n )\n def test_cummethods_bool_in_object_dtype(self, method, expected):\n ser = pd.Series([False, True, np.nan, False])\n result = getattr(ser, method)()\n tm.assert_series_equal(result, expected)\n\n def test_cumprod_timedelta(self):\n # GH#48111\n ser = pd.Series([pd.Timedelta(days=1), pd.Timedelta(days=3)])\n with pytest.raises(TypeError, match="cumprod not supported for Timedelta"):\n ser.cumprod()\n\n @pytest.mark.parametrize(\n "data, op, skipna, expected_data",\n [\n ([], "cumsum", True, []),\n ([], "cumsum", False, []),\n (["x", "z", "y"], "cumsum", True, ["x", "xz", "xzy"]),\n (["x", "z", "y"], "cumsum", False, ["x", "xz", "xzy"]),\n (["x", pd.NA, "y"], "cumsum", True, ["x", pd.NA, "xy"]),\n (["x", pd.NA, "y"], "cumsum", False, ["x", pd.NA, pd.NA]),\n ([pd.NA, "x", "y"], "cumsum", True, [pd.NA, "x", "xy"]),\n ([pd.NA, "x", "y"], "cumsum", False, [pd.NA, pd.NA, pd.NA]),\n ([pd.NA, pd.NA, pd.NA], "cumsum", True, [pd.NA, pd.NA, pd.NA]),\n ([pd.NA, pd.NA, pd.NA], "cumsum", False, [pd.NA, pd.NA, pd.NA]),\n ([], "cummin", True, []),\n ([], "cummin", False, []),\n (["y", "z", "x"], "cummin", True, ["y", "y", "x"]),\n (["y", "z", "x"], "cummin", False, ["y", "y", "x"]),\n (["y", pd.NA, "x"], "cummin", True, ["y", pd.NA, "x"]),\n (["y", pd.NA, "x"], "cummin", False, ["y", pd.NA, pd.NA]),\n ([pd.NA, "y", "x"], "cummin", True, [pd.NA, "y", "x"]),\n ([pd.NA, "y", "x"], "cummin", False, [pd.NA, pd.NA, pd.NA]),\n ([pd.NA, pd.NA, pd.NA], "cummin", True, [pd.NA, pd.NA, pd.NA]),\n ([pd.NA, pd.NA, pd.NA], "cummin", False, [pd.NA, pd.NA, pd.NA]),\n ([], "cummax", True, []),\n ([], "cummax", False, []),\n (["x", "z", "y"], "cummax", True, ["x", "z", "z"]),\n (["x", "z", "y"], "cummax", False, ["x", "z", "z"]),\n (["x", pd.NA, "y"], "cummax", True, ["x", pd.NA, "y"]),\n (["x", pd.NA, "y"], "cummax", False, ["x", pd.NA, pd.NA]),\n ([pd.NA, "x", "y"], "cummax", True, [pd.NA, "x", "y"]),\n ([pd.NA, "x", "y"], "cummax", False, [pd.NA, pd.NA, pd.NA]),\n ([pd.NA, pd.NA, pd.NA], "cummax", True, [pd.NA, pd.NA, pd.NA]),\n ([pd.NA, pd.NA, pd.NA], "cummax", False, [pd.NA, pd.NA, pd.NA]),\n ],\n )\n def test_cum_methods_ea_strings(\n self, string_dtype_no_object, data, op, skipna, expected_data\n ):\n # https://github.com/pandas-dev/pandas/pull/60633 - pyarrow\n # https://github.com/pandas-dev/pandas/pull/60938 - Python\n ser = pd.Series(data, dtype=string_dtype_no_object)\n method = getattr(ser, op)\n expected = pd.Series(expected_data, dtype=string_dtype_no_object)\n result = method(skipna=skipna)\n tm.assert_series_equal(result, expected)\n\n def test_cumprod_pyarrow_strings(self, pyarrow_string_dtype, skipna):\n # https://github.com/pandas-dev/pandas/pull/60633\n ser = pd.Series(list("xyz"), dtype=pyarrow_string_dtype)\n msg = re.escape(f"operation 'cumprod' not supported for dtype '{ser.dtype}'")\n with pytest.raises(TypeError, match=msg):\n ser.cumprod(skipna=skipna)\n | .venv\Lib\site-packages\pandas\tests\series\test_cumulative.py | test_cumulative.py | Python | 7,949 | 0.95 | 0.061321 | 0.059459 | awesome-app | 797 | 2025-02-03T08:00:16.791633 | GPL-3.0 | true | 8a0b8d3a6e902d1f4fc53619be03b2db |
from datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Index,\n Series,\n date_range,\n option_context,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesRepr:\n def test_multilevel_name_print_0(self):\n # GH#55415 None does not get printed, but 0 does\n # (matching DataFrame and flat index behavior)\n mi = pd.MultiIndex.from_product([range(2, 3), range(3, 4)], names=[0, None])\n ser = Series(1.5, index=mi)\n\n res = repr(ser)\n expected = "0 \n2 3 1.5\ndtype: float64"\n assert res == expected\n\n def test_multilevel_name_print(self, lexsorted_two_level_string_multiindex):\n index = lexsorted_two_level_string_multiindex\n ser = Series(range(len(index)), index=index, name="sth")\n expected = [\n "first second",\n "foo one 0",\n " two 1",\n " three 2",\n "bar one 3",\n " two 4",\n "baz two 5",\n " three 6",\n "qux one 7",\n " two 8",\n " three 9",\n "Name: sth, dtype: int64",\n ]\n expected = "\n".join(expected)\n assert repr(ser) == expected\n\n def test_small_name_printing(self):\n # Test small Series.\n s = Series([0, 1, 2])\n\n s.name = "test"\n assert "Name: test" in repr(s)\n\n s.name = None\n assert "Name:" not in repr(s)\n\n def test_big_name_printing(self):\n # Test big Series (diff code path).\n s = Series(range(1000))\n\n s.name = "test"\n assert "Name: test" in repr(s)\n\n s.name = None\n assert "Name:" not in repr(s)\n\n def test_empty_name_printing(self):\n s = Series(index=date_range("20010101", "20020101"), name="test", dtype=object)\n assert "Name: test" in repr(s)\n\n @pytest.mark.parametrize("args", [(), (0, -1)])\n def test_float_range(self, args):\n str(\n Series(\n np.random.default_rng(2).standard_normal(1000),\n index=np.arange(1000, *args),\n )\n )\n\n def test_empty_object(self):\n # empty\n str(Series(dtype=object))\n\n def test_string(self, string_series):\n str(string_series)\n str(string_series.astype(int))\n\n # with NaNs\n string_series[5:7] = np.nan\n str(string_series)\n\n def test_object(self, object_series):\n str(object_series)\n\n def test_datetime(self, datetime_series):\n str(datetime_series)\n # with Nones\n ots = datetime_series.astype("O")\n ots[::2] = None\n repr(ots)\n\n @pytest.mark.parametrize(\n "name",\n [\n "",\n 1,\n 1.2,\n "foo",\n "\u03B1\u03B2\u03B3",\n "loooooooooooooooooooooooooooooooooooooooooooooooooooong",\n ("foo", "bar", "baz"),\n (1, 2),\n ("foo", 1, 2.3),\n ("\u03B1", "\u03B2", "\u03B3"),\n ("\u03B1", "bar"),\n ],\n )\n def test_various_names(self, name, string_series):\n # various names\n string_series.name = name\n repr(string_series)\n\n def test_tuple_name(self):\n biggie = Series(\n np.random.default_rng(2).standard_normal(1000),\n index=np.arange(1000),\n name=("foo", "bar", "baz"),\n )\n repr(biggie)\n\n @pytest.mark.parametrize("arg", [100, 1001])\n def test_tidy_repr_name_0(self, arg):\n # tidy repr\n ser = Series(np.random.default_rng(2).standard_normal(arg), name=0)\n rep_str = repr(ser)\n assert "Name: 0" in rep_str\n\n def test_newline(self, any_string_dtype):\n ser = Series(\n ["a\n\r\tb"],\n name="a\n\r\td",\n index=Index(["a\n\r\tf"], dtype=any_string_dtype),\n dtype=any_string_dtype,\n )\n assert "\t" not in repr(ser)\n assert "\r" not in repr(ser)\n assert "a\n" not in repr(ser)\n\n @pytest.mark.parametrize(\n "name, expected",\n [\n ["foo", "Series([], Name: foo, dtype: int64)"],\n [None, "Series([], dtype: int64)"],\n ],\n )\n def test_empty_int64(self, name, expected):\n # with empty series (#4651)\n s = Series([], dtype=np.int64, name=name)\n assert repr(s) == expected\n\n def test_repr_bool_fails(self, capsys):\n s = Series(\n [\n DataFrame(np.random.default_rng(2).standard_normal((2, 2)))\n for i in range(5)\n ]\n )\n\n # It works (with no Cython exception barf)!\n repr(s)\n\n captured = capsys.readouterr()\n assert captured.err == ""\n\n def test_repr_name_iterable_indexable(self):\n s = Series([1, 2, 3], name=np.int64(3))\n\n # it works!\n repr(s)\n\n s.name = ("\u05d0",) * 2\n repr(s)\n\n def test_repr_max_rows(self):\n # GH 6863\n with option_context("display.max_rows", None):\n str(Series(range(1001))) # should not raise exception\n\n def test_unicode_string_with_unicode(self):\n df = Series(["\u05d0"], name="\u05d1")\n str(df)\n\n ser = Series(["\u03c3"] * 10)\n repr(ser)\n\n ser2 = Series(["\u05d0"] * 1000)\n ser2.name = "title1"\n repr(ser2)\n\n def test_str_to_bytes_raises(self):\n # GH 26447\n df = Series(["abc"], name="abc")\n msg = "^'str' object cannot be interpreted as an integer$"\n with pytest.raises(TypeError, match=msg):\n bytes(df)\n\n def test_timeseries_repr_object_dtype(self):\n index = Index(\n [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)], dtype=object\n )\n ts = Series(np.random.default_rng(2).standard_normal(len(index)), index)\n repr(ts)\n\n ts = Series(\n np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20)\n )\n assert repr(ts).splitlines()[-1].startswith("Freq:")\n\n ts2 = ts.iloc[np.random.default_rng(2).integers(0, len(ts) - 1, 400)]\n repr(ts2).splitlines()[-1]\n\n def test_latex_repr(self):\n pytest.importorskip("jinja2") # uses Styler implementation\n result = r"""\begin{tabular}{ll}\n\toprule\n & 0 \\\n\midrule\n0 & $\alpha$ \\\n1 & b \\\n2 & c \\\n\bottomrule\n\end{tabular}\n"""\n with option_context(\n "styler.format.escape", None, "styler.render.repr", "latex"\n ):\n s = Series([r"$\alpha$", "b", "c"])\n assert result == s._repr_latex_()\n\n assert s._repr_latex_() is None\n\n def test_index_repr_in_frame_with_nan(self):\n # see gh-25061\n i = Index([1, np.nan])\n s = Series([1, 2], index=i)\n exp = """1.0 1\nNaN 2\ndtype: int64"""\n\n assert repr(s) == exp\n\n def test_format_pre_1900_dates(self):\n rng = date_range("1/1/1850", "1/1/1950", freq="YE-DEC")\n msg = "DatetimeIndex.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rng.format()\n ts = Series(1, index=rng)\n repr(ts)\n\n def test_series_repr_nat(self):\n series = Series([0, 1000, 2000, pd.NaT._value], dtype="M8[ns]")\n\n result = repr(series)\n expected = (\n "0 1970-01-01 00:00:00.000000\n"\n "1 1970-01-01 00:00:00.000001\n"\n "2 1970-01-01 00:00:00.000002\n"\n "3 NaT\n"\n "dtype: datetime64[ns]"\n )\n assert result == expected\n\n def test_float_repr(self):\n # GH#35603\n # check float format when cast to object\n ser = Series([1.0]).astype(object)\n expected = "0 1.0\ndtype: object"\n assert repr(ser) == expected\n\n def test_different_null_objects(self):\n # GH#45263\n ser = Series([1, 2, 3, 4], [True, None, np.nan, pd.NaT])\n result = repr(ser)\n expected = "True 1\nNone 2\nNaN 3\nNaT 4\ndtype: int64"\n assert result == expected\n\n\nclass TestCategoricalRepr:\n def test_categorical_repr_unicode(self):\n # see gh-21002\n\n class County:\n name = "San Sebastián"\n state = "PR"\n\n def __repr__(self) -> str:\n return self.name + ", " + self.state\n\n cat = Categorical([County() for _ in range(61)])\n idx = Index(cat)\n ser = idx.to_series()\n\n repr(ser)\n str(ser)\n\n def test_categorical_repr(self, using_infer_string):\n a = Series(Categorical([1, 2, 3, 4]))\n exp = (\n "0 1\n1 2\n2 3\n3 4\n"\n "dtype: category\nCategories (4, int64): [1, 2, 3, 4]"\n )\n\n assert exp == a.__str__()\n\n a = Series(Categorical(["a", "b"] * 25))\n if using_infer_string:\n exp = (\n "0 a\n1 b\n"\n " ..\n"\n "48 a\n49 b\n"\n "Length: 50, dtype: category\nCategories (2, str): [a, b]"\n )\n else:\n exp = (\n "0 a\n1 b\n"\n " ..\n"\n "48 a\n49 b\n"\n "Length: 50, dtype: category\nCategories (2, object): ['a', 'b']"\n )\n with option_context("display.max_rows", 5):\n assert exp == repr(a)\n\n levs = list("abcdefghijklmnopqrstuvwxyz")\n a = Series(Categorical(["a", "b"], categories=levs, ordered=True))\n if using_infer_string:\n exp = (\n "0 a\n1 b\n"\n "dtype: category\n"\n "Categories (26, str): [a < b < c < d ... w < x < y < z]"\n )\n else:\n exp = (\n "0 a\n1 b\n"\n "dtype: category\n"\n "Categories (26, object): ['a' < 'b' < 'c' < 'd' ... "\n "'w' < 'x' < 'y' < 'z']"\n )\n assert exp == a.__str__()\n\n def test_categorical_series_repr(self):\n s = Series(Categorical([1, 2, 3]))\n exp = """0 1\n1 2\n2 3\ndtype: category\nCategories (3, int64): [1, 2, 3]"""\n\n assert repr(s) == exp\n\n s = Series(Categorical(np.arange(10)))\n exp = f"""0 0\n1 1\n2 2\n3 3\n4 4\n5 5\n6 6\n7 7\n8 8\n9 9\ndtype: category\nCategories (10, {np.dtype(int)}): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""\n\n assert repr(s) == exp\n\n def test_categorical_series_repr_ordered(self):\n s = Series(Categorical([1, 2, 3], ordered=True))\n exp = """0 1\n1 2\n2 3\ndtype: category\nCategories (3, int64): [1 < 2 < 3]"""\n\n assert repr(s) == exp\n\n s = Series(Categorical(np.arange(10), ordered=True))\n exp = f"""0 0\n1 1\n2 2\n3 3\n4 4\n5 5\n6 6\n7 7\n8 8\n9 9\ndtype: category\nCategories (10, {np.dtype(int)}): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""\n\n assert repr(s) == exp\n\n def test_categorical_series_repr_datetime(self):\n idx = date_range("2011-01-01 09:00", freq="h", periods=5)\n s = Series(Categorical(idx))\n exp = """0 2011-01-01 09:00:00\n1 2011-01-01 10:00:00\n2 2011-01-01 11:00:00\n3 2011-01-01 12:00:00\n4 2011-01-01 13:00:00\ndtype: category\nCategories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,\n 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa: E501\n\n assert repr(s) == exp\n\n idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern")\n s = Series(Categorical(idx))\n exp = """0 2011-01-01 09:00:00-05:00\n1 2011-01-01 10:00:00-05:00\n2 2011-01-01 11:00:00-05:00\n3 2011-01-01 12:00:00-05:00\n4 2011-01-01 13:00:00-05:00\ndtype: category\nCategories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n 2011-01-01 13:00:00-05:00]""" # noqa: E501\n\n assert repr(s) == exp\n\n def test_categorical_series_repr_datetime_ordered(self):\n idx = date_range("2011-01-01 09:00", freq="h", periods=5)\n s = Series(Categorical(idx, ordered=True))\n exp = """0 2011-01-01 09:00:00\n1 2011-01-01 10:00:00\n2 2011-01-01 11:00:00\n3 2011-01-01 12:00:00\n4 2011-01-01 13:00:00\ndtype: category\nCategories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <\n 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501\n\n assert repr(s) == exp\n\n idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern")\n s = Series(Categorical(idx, ordered=True))\n exp = """0 2011-01-01 09:00:00-05:00\n1 2011-01-01 10:00:00-05:00\n2 2011-01-01 11:00:00-05:00\n3 2011-01-01 12:00:00-05:00\n4 2011-01-01 13:00:00-05:00\ndtype: category\nCategories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <\n 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <\n 2011-01-01 13:00:00-05:00]""" # noqa: E501\n\n assert repr(s) == exp\n\n def test_categorical_series_repr_period(self):\n idx = period_range("2011-01-01 09:00", freq="h", periods=5)\n s = Series(Categorical(idx))\n exp = """0 2011-01-01 09:00\n1 2011-01-01 10:00\n2 2011-01-01 11:00\n3 2011-01-01 12:00\n4 2011-01-01 13:00\ndtype: category\nCategories (5, period[h]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,\n 2011-01-01 13:00]""" # noqa: E501\n\n assert repr(s) == exp\n\n idx = period_range("2011-01", freq="M", periods=5)\n s = Series(Categorical(idx))\n exp = """0 2011-01\n1 2011-02\n2 2011-03\n3 2011-04\n4 2011-05\ndtype: category\nCategories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""\n\n assert repr(s) == exp\n\n def test_categorical_series_repr_period_ordered(self):\n idx = period_range("2011-01-01 09:00", freq="h", periods=5)\n s = Series(Categorical(idx, ordered=True))\n exp = """0 2011-01-01 09:00\n1 2011-01-01 10:00\n2 2011-01-01 11:00\n3 2011-01-01 12:00\n4 2011-01-01 13:00\ndtype: category\nCategories (5, period[h]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <\n 2011-01-01 13:00]""" # noqa: E501\n\n assert repr(s) == exp\n\n idx = period_range("2011-01", freq="M", periods=5)\n s = Series(Categorical(idx, ordered=True))\n exp = """0 2011-01\n1 2011-02\n2 2011-03\n3 2011-04\n4 2011-05\ndtype: category\nCategories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""\n\n assert repr(s) == exp\n\n def test_categorical_series_repr_timedelta(self):\n idx = timedelta_range("1 days", periods=5)\n s = Series(Categorical(idx))\n exp = """0 1 days\n1 2 days\n2 3 days\n3 4 days\n4 5 days\ndtype: category\nCategories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""\n\n assert repr(s) == exp\n\n idx = timedelta_range("1 hours", periods=10)\n s = Series(Categorical(idx))\n exp = """0 0 days 01:00:00\n1 1 days 01:00:00\n2 2 days 01:00:00\n3 3 days 01:00:00\n4 4 days 01:00:00\n5 5 days 01:00:00\n6 6 days 01:00:00\n7 7 days 01:00:00\n8 8 days 01:00:00\n9 9 days 01:00:00\ndtype: category\nCategories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,\n 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,\n 8 days 01:00:00, 9 days 01:00:00]""" # noqa: E501\n\n assert repr(s) == exp\n\n def test_categorical_series_repr_timedelta_ordered(self):\n idx = timedelta_range("1 days", periods=5)\n s = Series(Categorical(idx, ordered=True))\n exp = """0 1 days\n1 2 days\n2 3 days\n3 4 days\n4 5 days\ndtype: category\nCategories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""\n\n assert repr(s) == exp\n\n idx = timedelta_range("1 hours", periods=10)\n s = Series(Categorical(idx, ordered=True))\n exp = """0 0 days 01:00:00\n1 1 days 01:00:00\n2 2 days 01:00:00\n3 3 days 01:00:00\n4 4 days 01:00:00\n5 5 days 01:00:00\n6 6 days 01:00:00\n7 7 days 01:00:00\n8 8 days 01:00:00\n9 9 days 01:00:00\ndtype: category\nCategories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <\n 3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <\n 8 days 01:00:00 < 9 days 01:00:00]""" # noqa: E501\n\n assert repr(s) == exp\n | .venv\Lib\site-packages\pandas\tests\series\test_formats.py | test_formats.py | Python | 17,078 | 0.95 | 0.079723 | 0.038934 | vue-tools | 338 | 2024-10-18T05:03:54.990509 | GPL-3.0 | true | 94038b0e42e0ddf0b373f1fd6299e49f |
class TestIteration:\n def test_keys(self, datetime_series):\n assert datetime_series.keys() is datetime_series.index\n\n def test_iter_datetimes(self, datetime_series):\n for i, val in enumerate(datetime_series):\n # pylint: disable-next=unnecessary-list-index-lookup\n assert val == datetime_series.iloc[i]\n\n def test_iter_strings(self, string_series):\n for i, val in enumerate(string_series):\n # pylint: disable-next=unnecessary-list-index-lookup\n assert val == string_series.iloc[i]\n\n def test_iteritems_datetimes(self, datetime_series):\n for idx, val in datetime_series.items():\n assert val == datetime_series[idx]\n\n def test_iteritems_strings(self, string_series):\n for idx, val in string_series.items():\n assert val == string_series[idx]\n\n # assert is lazy (generators don't define reverse, lists do)\n assert not hasattr(string_series.items(), "reverse")\n\n def test_items_datetimes(self, datetime_series):\n for idx, val in datetime_series.items():\n assert val == datetime_series[idx]\n\n def test_items_strings(self, string_series):\n for idx, val in string_series.items():\n assert val == string_series[idx]\n\n # assert is lazy (generators don't define reverse, lists do)\n assert not hasattr(string_series.items(), "reverse")\n | .venv\Lib\site-packages\pandas\tests\series\test_iteration.py | test_iteration.py | Python | 1,408 | 0.95 | 0.4 | 0.148148 | awesome-app | 79 | 2024-06-25T22:38:13.395213 | GPL-3.0 | true | 04de0ff0ffa5eadd71b8f0a9c9444b17 |
from datetime import datetime\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas import (\n ArrowDtype,\n DataFrame,\n Index,\n Series,\n StringDtype,\n bdate_range,\n)\nimport pandas._testing as tm\nfrom pandas.core import ops\n\n\nclass TestSeriesLogicalOps:\n @pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")\n @pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])\n def test_bool_operators_with_nas(self, bool_op):\n # boolean &, |, ^ should work with object arrays and propagate NAs\n ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)\n ser[::2] = np.nan\n\n mask = ser.isna()\n filled = ser.fillna(ser[0])\n\n result = bool_op(ser < ser[9], ser > ser[3])\n\n expected = bool_op(filled < filled[9], filled > filled[3])\n expected[mask] = False\n tm.assert_series_equal(result, expected)\n\n def test_logical_operators_bool_dtype_with_empty(self):\n # GH#9016: support bitwise op for integer types\n index = list("bca")\n\n s_tft = Series([True, False, True], index=index)\n s_fff = Series([False, False, False], index=index)\n s_empty = Series([], dtype=object)\n\n res = s_tft & s_empty\n expected = s_fff.sort_index()\n tm.assert_series_equal(res, expected)\n\n res = s_tft | s_empty\n expected = s_tft.sort_index()\n tm.assert_series_equal(res, expected)\n\n def test_logical_operators_int_dtype_with_int_dtype(self):\n # GH#9016: support bitwise op for integer types\n\n s_0123 = Series(range(4), dtype="int64")\n s_3333 = Series([3] * 4)\n s_4444 = Series([4] * 4)\n\n res = s_0123 & s_3333\n expected = Series(range(4), dtype="int64")\n tm.assert_series_equal(res, expected)\n\n res = s_0123 | s_4444\n expected = Series(range(4, 8), dtype="int64")\n tm.assert_series_equal(res, expected)\n\n s_1111 = Series([1] * 4, dtype="int8")\n res = s_0123 & s_1111\n expected = Series([0, 1, 0, 1], dtype="int64")\n tm.assert_series_equal(res, expected)\n\n res = s_0123.astype(np.int16) | s_1111.astype(np.int32)\n expected = Series([1, 1, 3, 3], dtype="int32")\n tm.assert_series_equal(res, expected)\n\n def test_logical_operators_int_dtype_with_int_scalar(self):\n # GH#9016: support bitwise op for integer types\n s_0123 = Series(range(4), dtype="int64")\n\n res = s_0123 & 0\n expected = Series([0] * 4)\n tm.assert_series_equal(res, expected)\n\n res = s_0123 & 1\n expected = Series([0, 1, 0, 1])\n tm.assert_series_equal(res, expected)\n\n def test_logical_operators_int_dtype_with_float(self):\n # GH#9016: support bitwise op for integer types\n s_0123 = Series(range(4), dtype="int64")\n\n warn_msg = (\n r"Logical ops \(and, or, xor\) between Pandas objects and "\n "dtype-less sequences"\n )\n\n msg = "Cannot perform.+with a dtyped.+array and scalar of type"\n with pytest.raises(TypeError, match=msg):\n s_0123 & np.nan\n with pytest.raises(TypeError, match=msg):\n s_0123 & 3.14\n msg = "unsupported operand type.+for &:"\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n s_0123 & [0.1, 4, 3.14, 2]\n with pytest.raises(TypeError, match=msg):\n s_0123 & np.array([0.1, 4, 3.14, 2])\n with pytest.raises(TypeError, match=msg):\n s_0123 & Series([0.1, 4, -3.14, 2])\n\n def test_logical_operators_int_dtype_with_str(self):\n s_1111 = Series([1] * 4, dtype="int8")\n\n warn_msg = (\n r"Logical ops \(and, or, xor\) between Pandas objects and "\n "dtype-less sequences"\n )\n\n msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"\n with pytest.raises(TypeError, match=msg):\n s_1111 & "a"\n with pytest.raises(TypeError, match="unsupported operand.+for &"):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n s_1111 & ["a", "b", "c", "d"]\n\n def test_logical_operators_int_dtype_with_bool(self):\n # GH#9016: support bitwise op for integer types\n s_0123 = Series(range(4), dtype="int64")\n\n expected = Series([False] * 4)\n\n result = s_0123 & False\n tm.assert_series_equal(result, expected)\n\n warn_msg = (\n r"Logical ops \(and, or, xor\) between Pandas objects and "\n "dtype-less sequences"\n )\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n result = s_0123 & [False]\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n result = s_0123 & (False,)\n tm.assert_series_equal(result, expected)\n\n result = s_0123 ^ False\n expected = Series([False, True, True, True])\n tm.assert_series_equal(result, expected)\n\n def test_logical_operators_int_dtype_with_object(self):\n # GH#9016: support bitwise op for integer types\n s_0123 = Series(range(4), dtype="int64")\n\n result = s_0123 & Series([False, np.nan, False, False])\n expected = Series([False] * 4)\n tm.assert_series_equal(result, expected)\n\n s_abNd = Series(["a", "b", np.nan, "d"])\n with pytest.raises(\n TypeError, match="unsupported.* 'int' and 'str'|'rand_' not supported"\n ):\n s_0123 & s_abNd\n\n def test_logical_operators_bool_dtype_with_int(self):\n index = list("bca")\n\n s_tft = Series([True, False, True], index=index)\n s_fff = Series([False, False, False], index=index)\n\n res = s_tft & 0\n expected = s_fff\n tm.assert_series_equal(res, expected)\n\n res = s_tft & 1\n expected = s_tft\n tm.assert_series_equal(res, expected)\n\n def test_logical_ops_bool_dtype_with_ndarray(self):\n # make sure we operate on ndarray the same as Series\n left = Series([True, True, True, False, True])\n right = [True, False, None, True, np.nan]\n\n msg = (\n r"Logical ops \(and, or, xor\) between Pandas objects and "\n "dtype-less sequences"\n )\n\n expected = Series([True, False, False, False, False])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = left & right\n tm.assert_series_equal(result, expected)\n result = left & np.array(right)\n tm.assert_series_equal(result, expected)\n result = left & Index(right)\n tm.assert_series_equal(result, expected)\n result = left & Series(right)\n tm.assert_series_equal(result, expected)\n\n expected = Series([True, True, True, True, True])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = left | right\n tm.assert_series_equal(result, expected)\n result = left | np.array(right)\n tm.assert_series_equal(result, expected)\n result = left | Index(right)\n tm.assert_series_equal(result, expected)\n result = left | Series(right)\n tm.assert_series_equal(result, expected)\n\n expected = Series([False, True, True, True, True])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = left ^ right\n tm.assert_series_equal(result, expected)\n result = left ^ np.array(right)\n tm.assert_series_equal(result, expected)\n result = left ^ Index(right)\n tm.assert_series_equal(result, expected)\n result = left ^ Series(right)\n tm.assert_series_equal(result, expected)\n\n def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):\n # GH#9016: support bitwise op for integer types\n\n index = list("bca")\n\n s_tft = Series([True, False, True], index=index)\n s_tft = Series([True, False, True], index=index)\n s_tff = Series([True, False, False], index=index)\n\n s_0123 = Series(range(4), dtype="int64")\n\n # s_0123 will be all false now because of reindexing like s_tft\n expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])\n with tm.assert_produces_warning(FutureWarning):\n result = s_tft & s_0123\n tm.assert_series_equal(result, expected)\n\n # GH 52538: Deprecate casting to object type when reindex is needed;\n # matches DataFrame behavior\n expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])\n with tm.assert_produces_warning(FutureWarning):\n result = s_0123 & s_tft\n tm.assert_series_equal(result, expected)\n\n s_a0b1c0 = Series([1], list("b"))\n\n with tm.assert_produces_warning(FutureWarning):\n res = s_tft & s_a0b1c0\n expected = s_tff.reindex(list("abc"))\n tm.assert_series_equal(res, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n res = s_tft | s_a0b1c0\n expected = s_tft.reindex(list("abc"))\n tm.assert_series_equal(res, expected)\n\n def test_scalar_na_logical_ops_corners(self):\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n msg = "Cannot perform.+with a dtyped.+array and scalar of type"\n with pytest.raises(TypeError, match=msg):\n s & datetime(2005, 1, 1)\n\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])\n s[::2] = np.nan\n\n expected = Series(True, index=s.index)\n expected[::2] = False\n\n msg = (\n r"Logical ops \(and, or, xor\) between Pandas objects and "\n "dtype-less sequences"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s & list(s)\n tm.assert_series_equal(result, expected)\n\n def test_scalar_na_logical_ops_corners_aligns(self):\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])\n s[::2] = np.nan\n d = DataFrame({"A": s})\n\n expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))\n\n result = s & d\n tm.assert_frame_equal(result, expected)\n\n result = d & s\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])\n def test_logical_ops_with_index(self, op):\n # GH#22092, GH#19792\n ser = Series([True, True, False, False])\n idx1 = Index([True, False, True, False])\n idx2 = Index([1, 0, 1, 0])\n\n expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])\n\n result = op(ser, idx1)\n tm.assert_series_equal(result, expected)\n\n expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], dtype=bool)\n\n result = op(ser, idx2)\n tm.assert_series_equal(result, expected)\n\n def test_reversed_xor_with_index_returns_series(self):\n # GH#22092, GH#19792 pre-2.0 these were aliased to setops\n ser = Series([True, True, False, False])\n idx1 = Index([True, False, True, False], dtype=bool)\n idx2 = Index([1, 0, 1, 0])\n\n expected = Series([False, True, True, False])\n result = idx1 ^ ser\n tm.assert_series_equal(result, expected)\n\n result = idx2 ^ ser\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "op",\n [\n ops.rand_,\n ops.ror_,\n ],\n )\n def test_reversed_logical_op_with_index_returns_series(self, op):\n # GH#22092, GH#19792\n ser = Series([True, True, False, False])\n idx1 = Index([True, False, True, False])\n idx2 = Index([1, 0, 1, 0])\n\n expected = Series(op(idx1.values, ser.values))\n result = op(ser, idx1)\n tm.assert_series_equal(result, expected)\n\n expected = op(ser, Series(idx2))\n result = op(ser, idx2)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "op, expected",\n [\n (ops.rand_, Series([False, False])),\n (ops.ror_, Series([True, True])),\n (ops.rxor, Series([True, True])),\n ],\n )\n def test_reverse_ops_with_index(self, op, expected):\n # https://github.com/pandas-dev/pandas/pull/23628\n # multi-set Index ops are buggy, so let's avoid duplicates...\n # GH#49503\n ser = Series([True, False])\n idx = Index([False, True])\n\n result = op(ser, idx)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string)")\n def test_logical_ops_label_based(self, using_infer_string):\n # GH#4947\n # logical ops should be label based\n\n a = Series([True, False, True], list("bca"))\n b = Series([False, True, False], list("abc"))\n\n expected = Series([False, True, False], list("abc"))\n result = a & b\n tm.assert_series_equal(result, expected)\n\n expected = Series([True, True, False], list("abc"))\n result = a | b\n tm.assert_series_equal(result, expected)\n\n expected = Series([True, False, False], list("abc"))\n result = a ^ b\n tm.assert_series_equal(result, expected)\n\n # rhs is bigger\n a = Series([True, False, True], list("bca"))\n b = Series([False, True, False, True], list("abcd"))\n\n expected = Series([False, True, False, False], list("abcd"))\n result = a & b\n tm.assert_series_equal(result, expected)\n\n expected = Series([True, True, False, False], list("abcd"))\n result = a | b\n tm.assert_series_equal(result, expected)\n\n # filling\n\n # vs empty\n empty = Series([], dtype=object)\n\n result = a & empty.copy()\n expected = Series([False, False, False], list("abc"))\n tm.assert_series_equal(result, expected)\n\n result = a | empty.copy()\n expected = Series([True, True, False], list("abc"))\n tm.assert_series_equal(result, expected)\n\n # vs non-matching\n with tm.assert_produces_warning(FutureWarning):\n result = a & Series([1], ["z"])\n expected = Series([False, False, False, False], list("abcz"))\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = a | Series([1], ["z"])\n expected = Series([True, True, False, False], list("abcz"))\n tm.assert_series_equal(result, expected)\n\n # identity\n # we would like s[s|e] == s to hold for any e, whether empty or not\n with tm.assert_produces_warning(FutureWarning):\n for e in [\n empty.copy(),\n Series([1], ["z"]),\n Series(np.nan, b.index),\n Series(np.nan, a.index),\n ]:\n result = a[a | e]\n tm.assert_series_equal(result, a[a])\n\n for e in [Series(["z"])]:\n if using_infer_string:\n # TODO(infer_string) should this behave differently?\n # -> https://github.com/pandas-dev/pandas/issues/60234\n with pytest.raises(\n TypeError, match="not supported for dtype|unsupported operand type"\n ):\n result = a[a | e]\n else:\n result = a[a | e]\n tm.assert_series_equal(result, a[a])\n\n # vs scalars\n index = list("bca")\n t = Series([True, False, True])\n\n for v in [True, 1, 2]:\n result = Series([True, False, True], index=index) | v\n expected = Series([True, True, True], index=index)\n tm.assert_series_equal(result, expected)\n\n msg = "Cannot perform.+with a dtyped.+array and scalar of type"\n for v in [np.nan, "foo"]:\n with pytest.raises(TypeError, match=msg):\n t | v\n\n for v in [False, 0]:\n result = Series([True, False, True], index=index) | v\n expected = Series([True, False, True], index=index)\n tm.assert_series_equal(result, expected)\n\n for v in [True, 1]:\n result = Series([True, False, True], index=index) & v\n expected = Series([True, False, True], index=index)\n tm.assert_series_equal(result, expected)\n\n for v in [False, 0]:\n result = Series([True, False, True], index=index) & v\n expected = Series([False, False, False], index=index)\n tm.assert_series_equal(result, expected)\n msg = "Cannot perform.+with a dtyped.+array and scalar of type"\n for v in [np.nan]:\n with pytest.raises(TypeError, match=msg):\n t & v\n\n def test_logical_ops_df_compat(self):\n # GH#1134\n s1 = Series([True, False, True], index=list("ABC"), name="x")\n s2 = Series([True, True, False], index=list("ABD"), name="x")\n\n exp = Series([True, False, False, False], index=list("ABCD"), name="x")\n tm.assert_series_equal(s1 & s2, exp)\n tm.assert_series_equal(s2 & s1, exp)\n\n # True | np.nan => True\n exp_or1 = Series([True, True, True, False], index=list("ABCD"), name="x")\n tm.assert_series_equal(s1 | s2, exp_or1)\n # np.nan | True => np.nan, filled with False\n exp_or = Series([True, True, False, False], index=list("ABCD"), name="x")\n tm.assert_series_equal(s2 | s1, exp_or)\n\n # DataFrame doesn't fill nan with False\n tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp.to_frame())\n tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp.to_frame())\n\n exp = DataFrame({"x": [True, True, np.nan, np.nan]}, index=list("ABCD"))\n tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp_or1.to_frame())\n tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp_or.to_frame())\n\n # different length\n s3 = Series([True, False, True], index=list("ABC"), name="x")\n s4 = Series([True, True, True, True], index=list("ABCD"), name="x")\n\n exp = Series([True, False, True, False], index=list("ABCD"), name="x")\n tm.assert_series_equal(s3 & s4, exp)\n tm.assert_series_equal(s4 & s3, exp)\n\n # np.nan | True => np.nan, filled with False\n exp_or1 = Series([True, True, True, False], index=list("ABCD"), name="x")\n tm.assert_series_equal(s3 | s4, exp_or1)\n # True | np.nan => True\n exp_or = Series([True, True, True, True], index=list("ABCD"), name="x")\n tm.assert_series_equal(s4 | s3, exp_or)\n\n tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp.to_frame())\n tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp.to_frame())\n\n tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp_or1.to_frame())\n tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp_or.to_frame())\n\n @pytest.mark.xfail(reason="Will pass once #52839 deprecation is enforced")\n def test_int_dtype_different_index_not_bool(self):\n # GH 52500\n ser1 = Series([1, 2, 3], index=[10, 11, 23], name="a")\n ser2 = Series([10, 20, 30], index=[11, 10, 23], name="a")\n result = np.bitwise_xor(ser1, ser2)\n expected = Series([21, 8, 29], index=[10, 11, 23], name="a")\n tm.assert_series_equal(result, expected)\n\n result = ser1 ^ ser2\n tm.assert_series_equal(result, expected)\n\n # TODO: this belongs in comparison tests\n def test_pyarrow_numpy_string_invalid(self):\n # GH#56008\n pa = pytest.importorskip("pyarrow")\n ser = Series([False, True])\n ser2 = Series(["a", "b"], dtype=StringDtype(na_value=np.nan))\n result = ser == ser2\n expected_eq = Series(False, index=ser.index)\n tm.assert_series_equal(result, expected_eq)\n\n result = ser != ser2\n expected_ne = Series(True, index=ser.index)\n tm.assert_series_equal(result, expected_ne)\n\n with pytest.raises(TypeError, match="Invalid comparison"):\n ser > ser2\n\n # GH#59505\n ser3 = ser2.astype("string[pyarrow]")\n result3_eq = ser3 == ser\n tm.assert_series_equal(result3_eq, expected_eq.astype("bool[pyarrow]"))\n result3_ne = ser3 != ser\n tm.assert_series_equal(result3_ne, expected_ne.astype("bool[pyarrow]"))\n\n with pytest.raises(TypeError, match="Invalid comparison"):\n ser > ser3\n\n ser4 = ser2.astype(ArrowDtype(pa.string()))\n result4_eq = ser4 == ser\n tm.assert_series_equal(result4_eq, expected_eq.astype("bool[pyarrow]"))\n result4_ne = ser4 != ser\n tm.assert_series_equal(result4_ne, expected_ne.astype("bool[pyarrow]"))\n\n with pytest.raises(TypeError, match="Invalid comparison"):\n ser > ser4\n | .venv\Lib\site-packages\pandas\tests\series\test_logical_ops.py | test_logical_ops.py | Python | 20,938 | 0.95 | 0.077601 | 0.088889 | vue-tools | 338 | 2024-12-04T23:35:34.869546 | BSD-3-Clause | true | a20336d2daaef92146af0f7aa8c41082 |
from datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import iNaT\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n Index,\n NaT,\n Series,\n isna,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesMissingData:\n def test_categorical_nan_handling(self):\n # NaNs are represented as -1 in labels\n s = Series(Categorical(["a", "b", np.nan, "a"]))\n tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))\n tm.assert_numpy_array_equal(\n s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)\n )\n\n def test_isna_for_inf(self):\n s = Series(["a", np.inf, np.nan, pd.NA, 1.0])\n msg = "use_inf_as_na option is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with pd.option_context("mode.use_inf_as_na", True):\n r = s.isna()\n dr = s.dropna()\n e = Series([False, True, True, True, False])\n de = Series(["a", 1.0], index=[0, 4])\n tm.assert_series_equal(r, e)\n tm.assert_series_equal(dr, de)\n\n def test_timedelta64_nan(self):\n td = Series([timedelta(days=i) for i in range(10)])\n\n # nan ops on timedeltas\n td1 = td.copy()\n td1[0] = np.nan\n assert isna(td1[0])\n assert td1[0]._value == iNaT\n td1[0] = td[0]\n assert not isna(td1[0])\n\n # GH#16674 iNaT is treated as an integer when given by the user\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n td1[1] = iNaT\n assert not isna(td1[1])\n assert td1.dtype == np.object_\n assert td1[1] == iNaT\n td1[1] = td[1]\n assert not isna(td1[1])\n\n td1[2] = NaT\n assert isna(td1[2])\n assert td1[2]._value == iNaT\n td1[2] = td[2]\n assert not isna(td1[2])\n\n # boolean setting\n # GH#2899 boolean setting\n td3 = np.timedelta64(timedelta(days=3))\n td7 = np.timedelta64(timedelta(days=7))\n td[(td > td3) & (td < td7)] = np.nan\n assert isna(td).sum() == 3\n\n @pytest.mark.xfail(\n reason="Chained inequality raises when trying to define 'selector'"\n )\n def test_logical_range_select(self, datetime_series):\n # NumPy limitation =(\n # https://github.com/pandas-dev/pandas/commit/9030dc021f07c76809848925cb34828f6c8484f3\n\n selector = -0.5 <= datetime_series <= 0.5\n expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)\n tm.assert_series_equal(selector, expected)\n\n def test_valid(self, datetime_series):\n ts = datetime_series.copy()\n ts.index = ts.index._with_freq(None)\n ts[::2] = np.nan\n\n result = ts.dropna()\n assert len(result) == ts.count()\n tm.assert_series_equal(result, ts[1::2])\n tm.assert_series_equal(result, ts[pd.notna(ts)])\n\n\ndef test_hasnans_uncached_for_series():\n # GH#19700\n # set float64 dtype to avoid upcast when setting nan\n idx = Index([0, 1], dtype="float64")\n assert idx.hasnans is False\n assert "hasnans" in idx._cache\n ser = idx.to_series()\n assert ser.hasnans is False\n assert not hasattr(ser, "_cache")\n ser.iloc[-1] = np.nan\n assert ser.hasnans is True\n | .venv\Lib\site-packages\pandas\tests\series\test_missing.py | test_missing.py | Python | 3,277 | 0.95 | 0.07619 | 0.102273 | awesome-app | 866 | 2024-09-17T04:14:57.979497 | GPL-3.0 | true | 2bd9f53310c9c4cb266228ea9a81581e |
"""\nTests for np.foo applied to Series, not necessarily ufuncs.\n"""\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import Series\nimport pandas._testing as tm\n\n\nclass TestPtp:\n def test_ptp(self):\n # GH#21614\n N = 1000\n arr = np.random.default_rng(2).standard_normal(N)\n ser = Series(arr)\n assert np.ptp(ser) == np.ptp(arr)\n\n\ndef test_numpy_unique(datetime_series):\n # it works!\n np.unique(datetime_series)\n\n\n@pytest.mark.parametrize("index", [["a", "b", "c", "d", "e"], None])\ndef test_numpy_argwhere(index):\n # GH#35331\n\n s = Series(range(5), index=index, dtype=np.int64)\n\n result = np.argwhere(s > 2).astype(np.int64)\n expected = np.array([[3], [4]], dtype=np.int64)\n\n tm.assert_numpy_array_equal(result, expected)\n\n\n@td.skip_if_no("pyarrow")\ndef test_log_arrow_backed_missing_value():\n # GH#56285\n ser = Series([1, 2, None], dtype="float64[pyarrow]")\n result = np.log(ser)\n expected = np.log(Series([1, 2, None], dtype="float64"))\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\test_npfuncs.py | test_npfuncs.py | Python | 1,093 | 0.95 | 0.130435 | 0.125 | awesome-app | 175 | 2024-02-29T14:04:32.099977 | MIT | true | 04dd7529cdbd4bf3101cbf3416269427 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Series\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize("operation, expected", [("min", "a"), ("max", "b")])\ndef test_reductions_series_strings(operation, expected):\n # GH#31746\n ser = Series(["a", "b"], dtype="string")\n res_operation_serie = getattr(ser, operation)()\n assert res_operation_serie == expected\n\n\n@pytest.mark.parametrize("as_period", [True, False])\ndef test_mode_extension_dtype(as_period):\n # GH#41927 preserve dt64tz dtype\n ser = Series([pd.Timestamp(1979, 4, n) for n in range(1, 5)])\n\n if as_period:\n ser = ser.dt.to_period("D")\n else:\n ser = ser.dt.tz_localize("US/Central")\n\n res = ser.mode()\n assert res.dtype == ser.dtype\n tm.assert_series_equal(res, ser)\n\n\ndef test_mode_nullable_dtype(any_numeric_ea_dtype):\n # GH#55340\n ser = Series([1, 3, 2, pd.NA, 3, 2, pd.NA], dtype=any_numeric_ea_dtype)\n result = ser.mode(dropna=False)\n expected = Series([2, 3, pd.NA], dtype=any_numeric_ea_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.mode(dropna=True)\n expected = Series([2, 3], dtype=any_numeric_ea_dtype)\n tm.assert_series_equal(result, expected)\n\n ser[-1] = pd.NA\n\n result = ser.mode(dropna=True)\n expected = Series([2, 3], dtype=any_numeric_ea_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.mode(dropna=False)\n expected = Series([pd.NA], dtype=any_numeric_ea_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_mode_infer_string():\n # GH#56183\n pytest.importorskip("pyarrow")\n ser = Series(["a", "b"], dtype=object)\n with pd.option_context("future.infer_string", True):\n result = ser.mode()\n expected = Series(["a", "b"], dtype=object)\n tm.assert_series_equal(result, expected)\n\n\ndef test_reductions_td64_with_nat():\n # GH#8617\n ser = Series([0, pd.NaT], dtype="m8[ns]")\n exp = ser[0]\n assert ser.median() == exp\n assert ser.min() == exp\n assert ser.max() == exp\n\n\n@pytest.mark.parametrize("skipna", [True, False])\ndef test_td64_sum_empty(skipna):\n # GH#37151\n ser = Series([], dtype="timedelta64[ns]")\n\n result = ser.sum(skipna=skipna)\n assert isinstance(result, pd.Timedelta)\n assert result == pd.Timedelta(0)\n\n\ndef test_td64_summation_overflow():\n # GH#9442\n ser = Series(pd.date_range("20130101", periods=100000, freq="h"))\n ser[0] += pd.Timedelta("1s 1ms")\n\n # mean\n result = (ser - ser.min()).mean()\n expected = pd.Timedelta((pd.TimedeltaIndex(ser - ser.min()).asi8 / len(ser)).sum())\n\n # the computation is converted to float so\n # might be some loss of precision\n assert np.allclose(result._value / 1000, expected._value / 1000)\n\n # sum\n msg = "overflow in timedelta operation"\n with pytest.raises(ValueError, match=msg):\n (ser - ser.min()).sum()\n\n s1 = ser[0:10000]\n with pytest.raises(ValueError, match=msg):\n (s1 - s1.min()).sum()\n s2 = ser[0:1000]\n (s2 - s2.min()).sum()\n\n\ndef test_prod_numpy16_bug():\n ser = Series([1.0, 1.0, 1.0], index=range(3))\n result = ser.prod()\n\n assert not isinstance(result, Series)\n\n\n@pytest.mark.parametrize("func", [np.any, np.all])\n@pytest.mark.parametrize("kwargs", [{"keepdims": True}, {"out": object()}])\ndef test_validate_any_all_out_keepdims_raises(kwargs, func):\n ser = Series([1, 2])\n param = next(iter(kwargs))\n name = func.__name__\n\n msg = (\n f"the '{param}' parameter is not "\n "supported in the pandas "\n rf"implementation of {name}\(\)"\n )\n with pytest.raises(ValueError, match=msg):\n func(ser, **kwargs)\n\n\ndef test_validate_sum_initial():\n ser = Series([1, 2])\n msg = (\n r"the 'initial' parameter is not "\n r"supported in the pandas "\n r"implementation of sum\(\)"\n )\n with pytest.raises(ValueError, match=msg):\n np.sum(ser, initial=10)\n\n\ndef test_validate_median_initial():\n ser = Series([1, 2])\n msg = (\n r"the 'overwrite_input' parameter is not "\n r"supported in the pandas "\n r"implementation of median\(\)"\n )\n with pytest.raises(ValueError, match=msg):\n # It seems like np.median doesn't dispatch, so we use the\n # method instead of the ufunc.\n ser.median(overwrite_input=True)\n\n\ndef test_validate_stat_keepdims():\n ser = Series([1, 2])\n msg = (\n r"the 'keepdims' parameter is not "\n r"supported in the pandas "\n r"implementation of sum\(\)"\n )\n with pytest.raises(ValueError, match=msg):\n np.sum(ser, keepdims=True)\n\n\ndef test_mean_with_convertible_string_raises(using_array_manager, using_infer_string):\n # GH#44008\n ser = Series(["1", "2"])\n assert ser.sum() == "12"\n\n msg = "Could not convert string '12' to numeric|does not support|Cannot perform"\n with pytest.raises(TypeError, match=msg):\n ser.mean()\n\n df = ser.to_frame()\n if not using_array_manager:\n msg = r"Could not convert \['12'\] to numeric|does not support|Cannot perform"\n with pytest.raises(TypeError, match=msg):\n df.mean()\n\n\ndef test_mean_dont_convert_j_to_complex(using_array_manager):\n # GH#36703\n df = pd.DataFrame([{"db": "J", "numeric": 123}])\n if using_array_manager:\n msg = "Could not convert string 'J' to numeric"\n else:\n msg = r"Could not convert \['J'\] to numeric|does not support|Cannot perform"\n with pytest.raises(TypeError, match=msg):\n df.mean()\n\n with pytest.raises(TypeError, match=msg):\n df.agg("mean")\n\n msg = "Could not convert string 'J' to numeric|does not support|Cannot perform"\n with pytest.raises(TypeError, match=msg):\n df["db"].mean()\n msg = "Could not convert string 'J' to numeric|ufunc 'divide'|Cannot perform"\n with pytest.raises(TypeError, match=msg):\n np.mean(df["db"].astype("string").array)\n\n\ndef test_median_with_convertible_string_raises(using_array_manager):\n # GH#34671 this _could_ return a string "2", but definitely not float 2.0\n msg = r"Cannot convert \['1' '2' '3'\] to numeric|does not support|Cannot perform"\n ser = Series(["1", "2", "3"])\n with pytest.raises(TypeError, match=msg):\n ser.median()\n\n if not using_array_manager:\n msg = (\n r"Cannot convert \[\['1' '2' '3'\]\] to numeric|does not support"\n "|Cannot perform"\n )\n df = ser.to_frame()\n with pytest.raises(TypeError, match=msg):\n df.median()\n | .venv\Lib\site-packages\pandas\tests\series\test_reductions.py | test_reductions.py | Python | 6,518 | 0.95 | 0.092166 | 0.095238 | vue-tools | 137 | 2024-12-17T12:53:48.424869 | MIT | true | 40a09b7e040ec99629426ade9c12041d |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning"\n)\n\n\nclass TestSeriesSubclassing:\n @pytest.mark.parametrize(\n "idx_method, indexer, exp_data, exp_idx",\n [\n ["loc", ["a", "b"], [1, 2], "ab"],\n ["iloc", [2, 3], [3, 4], "cd"],\n ],\n )\n def test_indexing_sliced(self, idx_method, indexer, exp_data, exp_idx):\n s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"))\n res = getattr(s, idx_method)[indexer]\n exp = tm.SubclassedSeries(exp_data, index=list(exp_idx))\n tm.assert_series_equal(res, exp)\n\n def test_to_frame(self):\n s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"), name="xxx")\n res = s.to_frame()\n exp = tm.SubclassedDataFrame({"xxx": [1, 2, 3, 4]}, index=list("abcd"))\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_unstack(self):\n # GH 15564\n s = tm.SubclassedSeries([1, 2, 3, 4], index=[list("aabb"), list("xyxy")])\n\n res = s.unstack()\n exp = tm.SubclassedDataFrame({"x": [1, 3], "y": [2, 4]}, index=["a", "b"])\n\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_empty_repr(self):\n sub_series = tm.SubclassedSeries()\n assert "SubclassedSeries" in repr(sub_series)\n\n def test_asof(self):\n N = 3\n rng = pd.date_range("1/1/1990", periods=N, freq="53s")\n s = tm.SubclassedSeries({"A": [np.nan, np.nan, np.nan]}, index=rng)\n\n result = s.asof(rng[-2:])\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_explode(self):\n s = tm.SubclassedSeries([[1, 2, 3], "foo", [], [3, 4]])\n result = s.explode()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_equals(self):\n # https://github.com/pandas-dev/pandas/pull/34402\n # allow subclass in both directions\n s1 = pd.Series([1, 2, 3])\n s2 = tm.SubclassedSeries([1, 2, 3])\n assert s1.equals(s2)\n assert s2.equals(s1)\n\n\nclass SubclassedSeries(pd.Series):\n @property\n def _constructor(self):\n def _new(*args, **kwargs):\n # some constructor logic that accesses the Series' name\n if self.name == "test":\n return pd.Series(*args, **kwargs)\n return SubclassedSeries(*args, **kwargs)\n\n return _new\n\n\ndef test_constructor_from_dict():\n # https://github.com/pandas-dev/pandas/issues/52445\n result = SubclassedSeries({"a": 1, "b": 2, "c": 3})\n assert isinstance(result, SubclassedSeries)\n | .venv\Lib\site-packages\pandas\tests\series\test_subclass.py | test_subclass.py | Python | 2,667 | 0.95 | 0.158537 | 0.078125 | react-lib | 134 | 2024-06-15T23:44:53.723541 | BSD-3-Clause | true | 8617cd554a1913d6c94129530f423fad |
from collections import deque\nimport re\nimport string\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.arrays import SparseArray\n\n\n@pytest.fixture(params=[np.add, np.logaddexp])\ndef ufunc(request):\n # dunder op\n return request.param\n\n\n@pytest.fixture(\n params=[pytest.param(True, marks=pytest.mark.fails_arm_wheels), False],\n ids=["sparse", "dense"],\n)\ndef sparse(request):\n return request.param\n\n\n@pytest.fixture\ndef arrays_for_binary_ufunc():\n """\n A pair of random, length-100 integer-dtype arrays, that are mostly 0.\n """\n a1 = np.random.default_rng(2).integers(0, 10, 100, dtype="int64")\n a2 = np.random.default_rng(2).integers(0, 10, 100, dtype="int64")\n a1[::3] = 0\n a2[::4] = 0\n return a1, a2\n\n\n@pytest.mark.parametrize("ufunc", [np.positive, np.floor, np.exp])\ndef test_unary_ufunc(ufunc, sparse):\n # Test that ufunc(pd.Series) == pd.Series(ufunc)\n arr = np.random.default_rng(2).integers(0, 10, 10, dtype="int64")\n arr[::2] = 0\n if sparse:\n arr = SparseArray(arr, dtype=pd.SparseDtype("int64", 0))\n\n index = list(string.ascii_letters[:10])\n name = "name"\n series = pd.Series(arr, index=index, name=name)\n\n result = ufunc(series)\n expected = pd.Series(ufunc(arr), index=index, name=name)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])\ndef test_binary_ufunc_with_array(flip, sparse, ufunc, arrays_for_binary_ufunc):\n # Test that ufunc(pd.Series(a), array) == pd.Series(ufunc(a, b))\n a1, a2 = arrays_for_binary_ufunc\n if sparse:\n a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0))\n a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0))\n\n name = "name" # op(pd.Series, array) preserves the name.\n series = pd.Series(a1, name=name)\n other = a2\n\n array_args = (a1, a2)\n series_args = (series, other) # ufunc(series, array)\n\n if flip:\n array_args = reversed(array_args)\n series_args = reversed(series_args) # ufunc(array, series)\n\n expected = pd.Series(ufunc(*array_args), name=name)\n result = ufunc(*series_args)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])\ndef test_binary_ufunc_with_index(flip, sparse, ufunc, arrays_for_binary_ufunc):\n # Test that\n # * func(pd.Series(a), pd.Series(b)) == pd.Series(ufunc(a, b))\n # * ufunc(Index, pd.Series) dispatches to pd.Series (returns a pd.Series)\n a1, a2 = arrays_for_binary_ufunc\n if sparse:\n a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0))\n a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0))\n\n name = "name" # op(pd.Series, array) preserves the name.\n series = pd.Series(a1, name=name)\n\n other = pd.Index(a2, name=name).astype("int64")\n\n array_args = (a1, a2)\n series_args = (series, other) # ufunc(series, array)\n\n if flip:\n array_args = reversed(array_args)\n series_args = reversed(series_args) # ufunc(array, series)\n\n expected = pd.Series(ufunc(*array_args), name=name)\n result = ufunc(*series_args)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("shuffle", [True, False], ids=["unaligned", "aligned"])\n@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])\ndef test_binary_ufunc_with_series(\n flip, shuffle, sparse, ufunc, arrays_for_binary_ufunc\n):\n # Test that\n # * func(pd.Series(a), pd.Series(b)) == pd.Series(ufunc(a, b))\n # with alignment between the indices\n a1, a2 = arrays_for_binary_ufunc\n if sparse:\n a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0))\n a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0))\n\n name = "name" # op(pd.Series, array) preserves the name.\n series = pd.Series(a1, name=name)\n other = pd.Series(a2, name=name)\n\n idx = np.random.default_rng(2).permutation(len(a1))\n\n if shuffle:\n other = other.take(idx)\n if flip:\n index = other.align(series)[0].index\n else:\n index = series.align(other)[0].index\n else:\n index = series.index\n\n array_args = (a1, a2)\n series_args = (series, other) # ufunc(series, array)\n\n if flip:\n array_args = tuple(reversed(array_args))\n series_args = tuple(reversed(series_args)) # ufunc(array, series)\n\n expected = pd.Series(ufunc(*array_args), index=index, name=name)\n result = ufunc(*series_args)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("flip", [True, False])\ndef test_binary_ufunc_scalar(ufunc, sparse, flip, arrays_for_binary_ufunc):\n # Test that\n # * ufunc(pd.Series, scalar) == pd.Series(ufunc(array, scalar))\n # * ufunc(pd.Series, scalar) == ufunc(scalar, pd.Series)\n arr, _ = arrays_for_binary_ufunc\n if sparse:\n arr = SparseArray(arr)\n other = 2\n series = pd.Series(arr, name="name")\n\n series_args = (series, other)\n array_args = (arr, other)\n\n if flip:\n series_args = tuple(reversed(series_args))\n array_args = tuple(reversed(array_args))\n\n expected = pd.Series(ufunc(*array_args), name="name")\n result = ufunc(*series_args)\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("ufunc", [np.divmod]) # TODO: np.modf, np.frexp\n@pytest.mark.parametrize("shuffle", [True, False])\n@pytest.mark.filterwarnings("ignore:divide by zero:RuntimeWarning")\ndef test_multiple_output_binary_ufuncs(ufunc, sparse, shuffle, arrays_for_binary_ufunc):\n # Test that\n # the same conditions from binary_ufunc_scalar apply to\n # ufuncs with multiple outputs.\n\n a1, a2 = arrays_for_binary_ufunc\n # work around https://github.com/pandas-dev/pandas/issues/26987\n a1[a1 == 0] = 1\n a2[a2 == 0] = 1\n\n if sparse:\n a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0))\n a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0))\n\n s1 = pd.Series(a1)\n s2 = pd.Series(a2)\n\n if shuffle:\n # ensure we align before applying the ufunc\n s2 = s2.sample(frac=1)\n\n expected = ufunc(a1, a2)\n assert isinstance(expected, tuple)\n\n result = ufunc(s1, s2)\n assert isinstance(result, tuple)\n tm.assert_series_equal(result[0], pd.Series(expected[0]))\n tm.assert_series_equal(result[1], pd.Series(expected[1]))\n\n\ndef test_multiple_output_ufunc(sparse, arrays_for_binary_ufunc):\n # Test that the same conditions from unary input apply to multi-output\n # ufuncs\n arr, _ = arrays_for_binary_ufunc\n\n if sparse:\n arr = SparseArray(arr)\n\n series = pd.Series(arr, name="name")\n result = np.modf(series)\n expected = np.modf(arr)\n\n assert isinstance(result, tuple)\n assert isinstance(expected, tuple)\n\n tm.assert_series_equal(result[0], pd.Series(expected[0], name="name"))\n tm.assert_series_equal(result[1], pd.Series(expected[1], name="name"))\n\n\ndef test_binary_ufunc_drops_series_name(ufunc, sparse, arrays_for_binary_ufunc):\n # Drop the names when they differ.\n a1, a2 = arrays_for_binary_ufunc\n s1 = pd.Series(a1, name="a")\n s2 = pd.Series(a2, name="b")\n\n result = ufunc(s1, s2)\n assert result.name is None\n\n\ndef test_object_series_ok():\n class Dummy:\n def __init__(self, value) -> None:\n self.value = value\n\n def __add__(self, other):\n return self.value + other.value\n\n arr = np.array([Dummy(0), Dummy(1)])\n ser = pd.Series(arr)\n tm.assert_series_equal(np.add(ser, ser), pd.Series(np.add(ser, arr)))\n tm.assert_series_equal(np.add(ser, Dummy(1)), pd.Series(np.add(ser, Dummy(1))))\n\n\n@pytest.fixture(\n params=[\n pd.array([1, 3, 2], dtype=np.int64),\n pd.array([1, 3, 2], dtype="Int64"),\n pd.array([1, 3, 2], dtype="Float32"),\n pd.array([1, 10, 2], dtype="Sparse[int]"),\n pd.to_datetime(["2000", "2010", "2001"]),\n pd.to_datetime(["2000", "2010", "2001"]).tz_localize("CET"),\n pd.to_datetime(["2000", "2010", "2001"]).to_period(freq="D"),\n pd.to_timedelta(["1 Day", "3 Days", "2 Days"]),\n pd.IntervalIndex([pd.Interval(0, 1), pd.Interval(2, 3), pd.Interval(1, 2)]),\n ],\n ids=lambda x: str(x.dtype),\n)\ndef values_for_np_reduce(request):\n # min/max tests assume that these are monotonic increasing\n return request.param\n\n\nclass TestNumpyReductions:\n # TODO: cases with NAs, axis kwarg for DataFrame\n\n def test_multiply(self, values_for_np_reduce, box_with_array, request):\n box = box_with_array\n values = values_for_np_reduce\n\n with tm.assert_produces_warning(None):\n obj = box(values)\n\n if isinstance(values, pd.core.arrays.SparseArray):\n mark = pytest.mark.xfail(reason="SparseArray has no 'prod'")\n request.applymarker(mark)\n\n if values.dtype.kind in "iuf":\n result = np.multiply.reduce(obj)\n if box is pd.DataFrame:\n expected = obj.prod(numeric_only=False)\n tm.assert_series_equal(result, expected)\n elif box is pd.Index:\n # Index has no 'prod'\n expected = obj._values.prod()\n assert result == expected\n else:\n expected = obj.prod()\n assert result == expected\n else:\n msg = "|".join(\n [\n "does not support reduction",\n "unsupported operand type",\n "ufunc 'multiply' cannot use operands",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n np.multiply.reduce(obj)\n\n def test_add(self, values_for_np_reduce, box_with_array):\n box = box_with_array\n values = values_for_np_reduce\n\n with tm.assert_produces_warning(None):\n obj = box(values)\n\n if values.dtype.kind in "miuf":\n result = np.add.reduce(obj)\n if box is pd.DataFrame:\n expected = obj.sum(numeric_only=False)\n tm.assert_series_equal(result, expected)\n elif box is pd.Index:\n # Index has no 'sum'\n expected = obj._values.sum()\n assert result == expected\n else:\n expected = obj.sum()\n assert result == expected\n else:\n msg = "|".join(\n [\n "does not support reduction",\n "unsupported operand type",\n "ufunc 'add' cannot use operands",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n np.add.reduce(obj)\n\n def test_max(self, values_for_np_reduce, box_with_array):\n box = box_with_array\n values = values_for_np_reduce\n\n same_type = True\n if box is pd.Index and values.dtype.kind in ["i", "f"]:\n # ATM Index casts to object, so we get python ints/floats\n same_type = False\n\n with tm.assert_produces_warning(None):\n obj = box(values)\n\n result = np.maximum.reduce(obj)\n if box is pd.DataFrame:\n # TODO: cases with axis kwarg\n expected = obj.max(numeric_only=False)\n tm.assert_series_equal(result, expected)\n else:\n expected = values[1]\n assert result == expected\n if same_type:\n # check we have e.g. Timestamp instead of dt64\n assert type(result) == type(expected)\n\n def test_min(self, values_for_np_reduce, box_with_array):\n box = box_with_array\n values = values_for_np_reduce\n\n same_type = True\n if box is pd.Index and values.dtype.kind in ["i", "f"]:\n # ATM Index casts to object, so we get python ints/floats\n same_type = False\n\n with tm.assert_produces_warning(None):\n obj = box(values)\n\n result = np.minimum.reduce(obj)\n if box is pd.DataFrame:\n expected = obj.min(numeric_only=False)\n tm.assert_series_equal(result, expected)\n else:\n expected = values[0]\n assert result == expected\n if same_type:\n # check we have e.g. Timestamp instead of dt64\n assert type(result) == type(expected)\n\n\n@pytest.mark.parametrize("type_", [list, deque, tuple])\ndef test_binary_ufunc_other_types(type_):\n a = pd.Series([1, 2, 3], name="name")\n b = type_([3, 4, 5])\n\n result = np.add(a, b)\n expected = pd.Series(np.add(a.to_numpy(), b), name="name")\n tm.assert_series_equal(result, expected)\n\n\ndef test_object_dtype_ok():\n class Thing:\n def __init__(self, value) -> None:\n self.value = value\n\n def __add__(self, other):\n other = getattr(other, "value", other)\n return type(self)(self.value + other)\n\n def __eq__(self, other) -> bool:\n return type(other) is Thing and self.value == other.value\n\n def __repr__(self) -> str:\n return f"Thing({self.value})"\n\n s = pd.Series([Thing(1), Thing(2)])\n result = np.add(s, Thing(1))\n expected = pd.Series([Thing(2), Thing(3)])\n tm.assert_series_equal(result, expected)\n\n\ndef test_outer():\n # https://github.com/pandas-dev/pandas/issues/27186\n ser = pd.Series([1, 2, 3])\n obj = np.array([1, 2, 3])\n\n with pytest.raises(NotImplementedError, match=""):\n np.subtract.outer(ser, obj)\n\n\ndef test_np_matmul():\n # GH26650\n df1 = pd.DataFrame(data=[[-1, 1, 10]])\n df2 = pd.DataFrame(data=[-1, 1, 10])\n expected = pd.DataFrame(data=[102])\n\n result = np.matmul(df1, df2)\n tm.assert_frame_equal(expected, result)\n\n\ndef test_array_ufuncs_for_many_arguments():\n # GH39853\n def add3(x, y, z):\n return x + y + z\n\n ufunc = np.frompyfunc(add3, 3, 1)\n ser = pd.Series([1, 2])\n\n result = ufunc(ser, ser, 1)\n expected = pd.Series([3, 5], dtype=object)\n tm.assert_series_equal(result, expected)\n\n df = pd.DataFrame([[1, 2]])\n\n msg = (\n "Cannot apply ufunc <ufunc 'add3 (vectorized)'> "\n "to mixed DataFrame and Series inputs."\n )\n with pytest.raises(NotImplementedError, match=re.escape(msg)):\n ufunc(ser, ser, df)\n\n\n# TODO(CoW) see https://github.com/pandas-dev/pandas/pull/51082\n@td.skip_copy_on_write_not_yet_implemented\ndef test_np_fix():\n # np.fix is not a ufunc but is composed of several ufunc calls under the hood\n # with `out` and `where` keywords\n ser = pd.Series([-1.5, -0.5, 0.5, 1.5])\n result = np.fix(ser)\n expected = pd.Series([-1.0, -0.0, 0.0, 1.0])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\test_ufunc.py | test_ufunc.py | Python | 14,758 | 0.95 | 0.12743 | 0.097222 | vue-tools | 50 | 2023-12-15T07:45:33.861496 | GPL-3.0 | true | e8b757aa3d8f0bf15812d93e3f821224 |
import pytest\n\nfrom pandas import Series\nimport pandas._testing as tm\n\n\nclass TestSeriesUnaryOps:\n # __neg__, __pos__, __invert__\n\n def test_neg(self):\n ser = Series(range(5), dtype="float64", name="series")\n tm.assert_series_equal(-ser, -1 * ser)\n\n def test_invert(self):\n ser = Series(range(5), dtype="float64", name="series")\n tm.assert_series_equal(-(ser < 0), ~(ser < 0))\n\n @pytest.mark.parametrize(\n "source, neg_target, abs_target",\n [\n ([1, 2, 3], [-1, -2, -3], [1, 2, 3]),\n ([1, 2, None], [-1, -2, None], [1, 2, None]),\n ],\n )\n def test_all_numeric_unary_operators(\n self, any_numeric_ea_dtype, source, neg_target, abs_target\n ):\n # GH38794\n dtype = any_numeric_ea_dtype\n ser = Series(source, dtype=dtype)\n neg_result, pos_result, abs_result = -ser, +ser, abs(ser)\n if dtype.startswith("U"):\n neg_target = -Series(source, dtype=dtype)\n else:\n neg_target = Series(neg_target, dtype=dtype)\n\n abs_target = Series(abs_target, dtype=dtype)\n\n tm.assert_series_equal(neg_result, neg_target)\n tm.assert_series_equal(pos_result, ser)\n tm.assert_series_equal(abs_result, abs_target)\n\n @pytest.mark.parametrize("op", ["__neg__", "__abs__"])\n def test_unary_float_op_mask(self, float_ea_dtype, op):\n dtype = float_ea_dtype\n ser = Series([1.1, 2.2, 3.3], dtype=dtype)\n result = getattr(ser, op)()\n target = result.copy(deep=True)\n ser[0] = None\n tm.assert_series_equal(result, target)\n | .venv\Lib\site-packages\pandas\tests\series\test_unary.py | test_unary.py | Python | 1,620 | 0.95 | 0.12 | 0.04878 | node-utils | 206 | 2023-11-04T11:32:04.896265 | GPL-3.0 | true | f2e0c0ca6411b35bb9018ea493577de3 |
import pytest\n\n\n@pytest.mark.parametrize(\n "func",\n [\n "reset_index",\n "_set_name",\n "sort_values",\n "sort_index",\n "rename",\n "dropna",\n "drop_duplicates",\n ],\n)\n@pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0])\ndef test_validate_bool_args(string_series, func, inplace):\n """Tests for error handling related to data types of method arguments."""\n msg = 'For argument "inplace" expected type bool'\n kwargs = {"inplace": inplace}\n\n if func == "_set_name":\n kwargs["name"] = "hello"\n\n with pytest.raises(ValueError, match=msg):\n getattr(string_series, func)(**kwargs)\n | .venv\Lib\site-packages\pandas\tests\series\test_validate.py | test_validate.py | Python | 668 | 0.85 | 0.115385 | 0 | node-utils | 159 | 2024-01-05T20:18:36.491867 | BSD-3-Clause | true | 924a88ae3b2745a394ca493216aa6c13 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n DataFrame,\n Index,\n Series,\n Timestamp,\n date_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays.categorical import CategoricalAccessor\nfrom pandas.core.indexes.accessors import Properties\n\n\nclass TestCatAccessor:\n @pytest.mark.parametrize(\n "method",\n [\n lambda x: x.cat.set_categories([1, 2, 3]),\n lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),\n lambda x: x.cat.rename_categories([1, 2, 3]),\n lambda x: x.cat.remove_unused_categories(),\n lambda x: x.cat.remove_categories([2]),\n lambda x: x.cat.add_categories([4]),\n lambda x: x.cat.as_ordered(),\n lambda x: x.cat.as_unordered(),\n ],\n )\n def test_getname_categorical_accessor(self, method):\n # GH#17509\n ser = Series([1, 2, 3], name="A").astype("category")\n expected = "A"\n result = method(ser).name\n assert result == expected\n\n def test_cat_accessor(self):\n ser = Series(Categorical(["a", "b", np.nan, "a"]))\n tm.assert_index_equal(ser.cat.categories, Index(["a", "b"]))\n assert not ser.cat.ordered, False\n\n exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])\n\n res = ser.cat.set_categories(["b", "a"])\n tm.assert_categorical_equal(res.values, exp)\n\n ser[:] = "a"\n ser = ser.cat.remove_unused_categories()\n tm.assert_index_equal(ser.cat.categories, Index(["a"]))\n\n def test_cat_accessor_api(self):\n # GH#9322\n\n assert Series.cat is CategoricalAccessor\n ser = Series(list("aabbcde")).astype("category")\n assert isinstance(ser.cat, CategoricalAccessor)\n\n invalid = Series([1])\n with pytest.raises(AttributeError, match="only use .cat accessor"):\n invalid.cat\n assert not hasattr(invalid, "cat")\n\n def test_cat_accessor_no_new_attributes(self):\n # https://github.com/pandas-dev/pandas/issues/10673\n cat = Series(list("aabbcde")).astype("category")\n with pytest.raises(AttributeError, match="You cannot add any new attribute"):\n cat.cat.xlabel = "a"\n\n def test_categorical_delegations(self):\n # invalid accessor\n msg = r"Can only use \.cat accessor with a 'category' dtype"\n with pytest.raises(AttributeError, match=msg):\n Series([1, 2, 3]).cat\n with pytest.raises(AttributeError, match=msg):\n Series([1, 2, 3]).cat()\n with pytest.raises(AttributeError, match=msg):\n Series(["a", "b", "c"]).cat\n with pytest.raises(AttributeError, match=msg):\n Series(np.arange(5.0)).cat\n with pytest.raises(AttributeError, match=msg):\n Series([Timestamp("20130101")]).cat\n\n # Series should delegate calls to '.categories', '.codes', '.ordered'\n # and the methods '.set_categories()' 'drop_unused_categories()' to the\n # categorical\n ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))\n exp_categories = Index(["a", "b", "c"])\n tm.assert_index_equal(ser.cat.categories, exp_categories)\n ser = ser.cat.rename_categories([1, 2, 3])\n exp_categories = Index([1, 2, 3])\n tm.assert_index_equal(ser.cat.categories, exp_categories)\n\n exp_codes = Series([0, 1, 2, 0], dtype="int8")\n tm.assert_series_equal(ser.cat.codes, exp_codes)\n\n assert ser.cat.ordered\n ser = ser.cat.as_unordered()\n assert not ser.cat.ordered\n\n ser = ser.cat.as_ordered()\n assert ser.cat.ordered\n\n # reorder\n ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))\n exp_categories = Index(["c", "b", "a"])\n exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)\n ser = ser.cat.set_categories(["c", "b", "a"])\n tm.assert_index_equal(ser.cat.categories, exp_categories)\n tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)\n tm.assert_numpy_array_equal(ser.__array__(), exp_values)\n\n # remove unused categories\n ser = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"]))\n exp_categories = Index(["a", "b"])\n exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)\n ser = ser.cat.remove_unused_categories()\n tm.assert_index_equal(ser.cat.categories, exp_categories)\n tm.assert_numpy_array_equal(ser.values.__array__(), exp_values)\n tm.assert_numpy_array_equal(ser.__array__(), exp_values)\n\n # This method is likely to be confused, so test that it raises an error\n # on wrong inputs:\n msg = "'Series' object has no attribute 'set_categories'"\n with pytest.raises(AttributeError, match=msg):\n ser.set_categories([4, 3, 2, 1])\n\n # right: ser.cat.set_categories([4,3,2,1])\n\n # GH#18862 (let Series.cat.rename_categories take callables)\n ser = Series(Categorical(["a", "b", "c", "a"], ordered=True))\n result = ser.cat.rename_categories(lambda x: x.upper())\n expected = Series(\n Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True)\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "idx",\n [\n date_range("1/1/2015", periods=5),\n date_range("1/1/2015", periods=5, tz="MET"),\n period_range("1/1/2015", freq="D", periods=5),\n timedelta_range("1 days", "10 days"),\n ],\n )\n def test_dt_accessor_api_for_categorical(self, idx):\n # https://github.com/pandas-dev/pandas/issues/10661\n\n ser = Series(idx)\n cat = ser.astype("category")\n\n # only testing field (like .day)\n # and bool (is_month_start)\n attr_names = type(ser._values)._datetimelike_ops\n\n assert isinstance(cat.dt, Properties)\n\n special_func_defs = [\n ("strftime", ("%Y-%m-%d",), {}),\n ("round", ("D",), {}),\n ("floor", ("D",), {}),\n ("ceil", ("D",), {}),\n ("asfreq", ("D",), {}),\n ("as_unit", ("s"), {}),\n ]\n if idx.dtype == "M8[ns]":\n # exclude dt64tz since that is already localized and would raise\n tup = ("tz_localize", ("UTC",), {})\n special_func_defs.append(tup)\n elif idx.dtype.kind == "M":\n # exclude dt64 since that is not localized so would raise\n tup = ("tz_convert", ("EST",), {})\n special_func_defs.append(tup)\n\n _special_func_names = [f[0] for f in special_func_defs]\n\n _ignore_names = ["components", "tz_localize", "tz_convert"]\n\n func_names = [\n fname\n for fname in dir(ser.dt)\n if not (\n fname.startswith("_")\n or fname in attr_names\n or fname in _special_func_names\n or fname in _ignore_names\n )\n ]\n\n func_defs = [(fname, (), {}) for fname in func_names]\n func_defs.extend(\n f_def for f_def in special_func_defs if f_def[0] in dir(ser.dt)\n )\n\n for func, args, kwargs in func_defs:\n warn_cls = []\n if func == "to_period" and getattr(idx, "tz", None) is not None:\n # dropping TZ\n warn_cls.append(UserWarning)\n if func == "to_pydatetime":\n # deprecated to return Index[object]\n warn_cls.append(FutureWarning)\n if warn_cls:\n warn_cls = tuple(warn_cls)\n else:\n warn_cls = None\n with tm.assert_produces_warning(warn_cls):\n res = getattr(cat.dt, func)(*args, **kwargs)\n exp = getattr(ser.dt, func)(*args, **kwargs)\n\n tm.assert_equal(res, exp)\n\n for attr in attr_names:\n res = getattr(cat.dt, attr)\n exp = getattr(ser.dt, attr)\n\n tm.assert_equal(res, exp)\n\n def test_dt_accessor_api_for_categorical_invalid(self):\n invalid = Series([1, 2, 3]).astype("category")\n msg = "Can only use .dt accessor with datetimelike"\n\n with pytest.raises(AttributeError, match=msg):\n invalid.dt\n assert not hasattr(invalid, "str")\n\n def test_set_categories_setitem(self):\n # GH#43334\n\n df = DataFrame({"Survived": [1, 0, 1], "Sex": [0, 1, 1]}, dtype="category")\n\n df["Survived"] = df["Survived"].cat.rename_categories(["No", "Yes"])\n df["Sex"] = df["Sex"].cat.rename_categories(["female", "male"])\n\n # values should not be coerced to NaN\n assert list(df["Sex"]) == ["female", "male", "male"]\n assert list(df["Survived"]) == ["Yes", "No", "Yes"]\n\n df["Sex"] = Categorical(df["Sex"], categories=["female", "male"], ordered=False)\n df["Survived"] = Categorical(\n df["Survived"], categories=["No", "Yes"], ordered=False\n )\n\n # values should not be coerced to NaN\n assert list(df["Sex"]) == ["female", "male", "male"]\n assert list(df["Survived"]) == ["Yes", "No", "Yes"]\n\n def test_categorical_of_booleans_is_boolean(self):\n # https://github.com/pandas-dev/pandas/issues/46313\n df = DataFrame(\n {"int_cat": [1, 2, 3], "bool_cat": [True, False, False]}, dtype="category"\n )\n value = df["bool_cat"].cat.categories.dtype\n expected = np.dtype(np.bool_)\n assert value is expected\n | .venv\Lib\site-packages\pandas\tests\series\accessors\test_cat_accessor.py | test_cat_accessor.py | Python | 9,611 | 0.95 | 0.085271 | 0.111628 | react-lib | 747 | 2024-04-22T23:56:34.429325 | GPL-3.0 | true | 3cb36b6433f508ee8d902463acc30c5b |
import calendar\nfrom datetime import (\n date,\n datetime,\n time,\n)\nimport locale\nimport unicodedata\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs.timezones import maybe_get_tz\nfrom pandas.errors import SettingWithCopyError\n\nfrom pandas.core.dtypes.common import (\n is_integer_dtype,\n is_list_like,\n)\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n Period,\n PeriodIndex,\n Series,\n StringDtype,\n TimedeltaIndex,\n date_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n DatetimeArray,\n PeriodArray,\n TimedeltaArray,\n)\n\nok_for_period = PeriodArray._datetimelike_ops\nok_for_period_methods = ["strftime", "to_timestamp", "asfreq"]\nok_for_dt = DatetimeArray._datetimelike_ops\nok_for_dt_methods = [\n "to_period",\n "to_pydatetime",\n "tz_localize",\n "tz_convert",\n "normalize",\n "strftime",\n "round",\n "floor",\n "ceil",\n "day_name",\n "month_name",\n "isocalendar",\n "as_unit",\n]\nok_for_td = TimedeltaArray._datetimelike_ops\nok_for_td_methods = [\n "components",\n "to_pytimedelta",\n "total_seconds",\n "round",\n "floor",\n "ceil",\n "as_unit",\n]\n\n\ndef get_dir(ser):\n # check limited display api\n results = [r for r in ser.dt.__dir__() if not r.startswith("_")]\n return sorted(set(results))\n\n\nclass TestSeriesDatetimeValues:\n def _compare(self, ser, name):\n # GH 7207, 11128\n # test .dt namespace accessor\n\n def get_expected(ser, prop):\n result = getattr(Index(ser._values), prop)\n if isinstance(result, np.ndarray):\n if is_integer_dtype(result):\n result = result.astype("int64")\n elif not is_list_like(result) or isinstance(result, DataFrame):\n return result\n return Series(result, index=ser.index, name=ser.name)\n\n left = getattr(ser.dt, name)\n right = get_expected(ser, name)\n if not (is_list_like(left) and is_list_like(right)):\n assert left == right\n elif isinstance(left, DataFrame):\n tm.assert_frame_equal(left, right)\n else:\n tm.assert_series_equal(left, right)\n\n @pytest.mark.parametrize("freq", ["D", "s", "ms"])\n def test_dt_namespace_accessor_datetime64(self, freq):\n # GH#7207, GH#11128\n # test .dt namespace accessor\n\n # datetimeindex\n dti = date_range("20130101", periods=5, freq=freq)\n ser = Series(dti, name="xxx")\n\n for prop in ok_for_dt:\n # we test freq below\n if prop != "freq":\n self._compare(ser, prop)\n\n for prop in ok_for_dt_methods:\n getattr(ser.dt, prop)\n\n msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser.dt.to_pydatetime()\n assert isinstance(result, np.ndarray)\n assert result.dtype == object\n\n result = ser.dt.tz_localize("US/Eastern")\n exp_values = DatetimeIndex(ser.values).tz_localize("US/Eastern")\n expected = Series(exp_values, index=ser.index, name="xxx")\n tm.assert_series_equal(result, expected)\n\n tz_result = result.dt.tz\n assert str(tz_result) == "US/Eastern"\n freq_result = ser.dt.freq\n assert freq_result == DatetimeIndex(ser.values, freq="infer").freq\n\n # let's localize, then convert\n result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern")\n exp_values = (\n DatetimeIndex(ser.values).tz_localize("UTC").tz_convert("US/Eastern")\n )\n expected = Series(exp_values, index=ser.index, name="xxx")\n tm.assert_series_equal(result, expected)\n\n def test_dt_namespace_accessor_datetime64tz(self):\n # GH#7207, GH#11128\n # test .dt namespace accessor\n\n # datetimeindex with tz\n dti = date_range("20130101", periods=5, tz="US/Eastern")\n ser = Series(dti, name="xxx")\n for prop in ok_for_dt:\n # we test freq below\n if prop != "freq":\n self._compare(ser, prop)\n\n for prop in ok_for_dt_methods:\n getattr(ser.dt, prop)\n\n msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser.dt.to_pydatetime()\n assert isinstance(result, np.ndarray)\n assert result.dtype == object\n\n result = ser.dt.tz_convert("CET")\n expected = Series(ser._values.tz_convert("CET"), index=ser.index, name="xxx")\n tm.assert_series_equal(result, expected)\n\n tz_result = result.dt.tz\n assert str(tz_result) == "CET"\n freq_result = ser.dt.freq\n assert freq_result == DatetimeIndex(ser.values, freq="infer").freq\n\n def test_dt_namespace_accessor_timedelta(self):\n # GH#7207, GH#11128\n # test .dt namespace accessor\n\n # timedelta index\n cases = [\n Series(\n timedelta_range("1 day", periods=5), index=list("abcde"), name="xxx"\n ),\n Series(timedelta_range("1 day 01:23:45", periods=5, freq="s"), name="xxx"),\n Series(\n timedelta_range("2 days 01:23:45.012345", periods=5, freq="ms"),\n name="xxx",\n ),\n ]\n for ser in cases:\n for prop in ok_for_td:\n # we test freq below\n if prop != "freq":\n self._compare(ser, prop)\n\n for prop in ok_for_td_methods:\n getattr(ser.dt, prop)\n\n result = ser.dt.components\n assert isinstance(result, DataFrame)\n tm.assert_index_equal(result.index, ser.index)\n\n result = ser.dt.to_pytimedelta()\n assert isinstance(result, np.ndarray)\n assert result.dtype == object\n\n result = ser.dt.total_seconds()\n assert isinstance(result, Series)\n assert result.dtype == "float64"\n\n freq_result = ser.dt.freq\n assert freq_result == TimedeltaIndex(ser.values, freq="infer").freq\n\n def test_dt_namespace_accessor_period(self):\n # GH#7207, GH#11128\n # test .dt namespace accessor\n\n # periodindex\n pi = period_range("20130101", periods=5, freq="D")\n ser = Series(pi, name="xxx")\n\n for prop in ok_for_period:\n # we test freq below\n if prop != "freq":\n self._compare(ser, prop)\n\n for prop in ok_for_period_methods:\n getattr(ser.dt, prop)\n\n freq_result = ser.dt.freq\n assert freq_result == PeriodIndex(ser.values).freq\n\n def test_dt_namespace_accessor_index_and_values(self):\n # both\n index = date_range("20130101", periods=3, freq="D")\n dti = date_range("20140204", periods=3, freq="s")\n ser = Series(dti, index=index, name="xxx")\n exp = Series(\n np.array([2014, 2014, 2014], dtype="int32"), index=index, name="xxx"\n )\n tm.assert_series_equal(ser.dt.year, exp)\n\n exp = Series(np.array([2, 2, 2], dtype="int32"), index=index, name="xxx")\n tm.assert_series_equal(ser.dt.month, exp)\n\n exp = Series(np.array([0, 1, 2], dtype="int32"), index=index, name="xxx")\n tm.assert_series_equal(ser.dt.second, exp)\n\n exp = Series([ser.iloc[0]] * 3, index=index, name="xxx")\n tm.assert_series_equal(ser.dt.normalize(), exp)\n\n def test_dt_accessor_limited_display_api(self):\n # tznaive\n ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx")\n results = get_dir(ser)\n tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods)))\n\n # tzaware\n ser = Series(date_range("2015-01-01", "2016-01-01", freq="min"), name="xxx")\n ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago")\n results = get_dir(ser)\n tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods)))\n\n # Period\n idx = period_range("20130101", periods=5, freq="D", name="xxx").astype(object)\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n ser = Series(idx)\n results = get_dir(ser)\n tm.assert_almost_equal(\n results, sorted(set(ok_for_period + ok_for_period_methods))\n )\n\n def test_dt_accessor_ambiguous_freq_conversions(self):\n # GH#11295\n # ambiguous time error on the conversions\n ser = Series(date_range("2015-01-01", "2016-01-01", freq="min"), name="xxx")\n ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago")\n\n exp_values = date_range(\n "2015-01-01", "2016-01-01", freq="min", tz="UTC"\n ).tz_convert("America/Chicago")\n # freq not preserved by tz_localize above\n exp_values = exp_values._with_freq(None)\n expected = Series(exp_values, name="xxx")\n tm.assert_series_equal(ser, expected)\n\n def test_dt_accessor_not_writeable(self, using_copy_on_write, warn_copy_on_write):\n # no setting allowed\n ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx")\n with pytest.raises(ValueError, match="modifications"):\n ser.dt.hour = 5\n\n # trying to set a copy\n msg = "modifications to a property of a datetimelike.+not supported"\n with pd.option_context("chained_assignment", "raise"):\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n ser.dt.hour[0] = 5\n elif warn_copy_on_write:\n with tm.assert_produces_warning(\n FutureWarning, match="ChainedAssignmentError"\n ):\n ser.dt.hour[0] = 5\n else:\n with pytest.raises(SettingWithCopyError, match=msg):\n ser.dt.hour[0] = 5\n\n @pytest.mark.parametrize(\n "method, dates",\n [\n ["round", ["2012-01-02", "2012-01-02", "2012-01-01"]],\n ["floor", ["2012-01-01", "2012-01-01", "2012-01-01"]],\n ["ceil", ["2012-01-02", "2012-01-02", "2012-01-02"]],\n ],\n )\n def test_dt_round(self, method, dates):\n # round\n ser = Series(\n pd.to_datetime(\n ["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"]\n ),\n name="xxx",\n )\n result = getattr(ser.dt, method)("D")\n expected = Series(pd.to_datetime(dates), name="xxx")\n tm.assert_series_equal(result, expected)\n\n def test_dt_round_tz(self):\n ser = Series(\n pd.to_datetime(\n ["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"]\n ),\n name="xxx",\n )\n result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern").dt.round("D")\n\n exp_values = pd.to_datetime(\n ["2012-01-01", "2012-01-01", "2012-01-01"]\n ).tz_localize("US/Eastern")\n expected = Series(exp_values, name="xxx")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("method", ["ceil", "round", "floor"])\n def test_dt_round_tz_ambiguous(self, method):\n # GH 18946 round near "fall back" DST\n df1 = DataFrame(\n [\n pd.to_datetime("2017-10-29 02:00:00+02:00", utc=True),\n pd.to_datetime("2017-10-29 02:00:00+01:00", utc=True),\n pd.to_datetime("2017-10-29 03:00:00+01:00", utc=True),\n ],\n columns=["date"],\n )\n df1["date"] = df1["date"].dt.tz_convert("Europe/Madrid")\n # infer\n result = getattr(df1.date.dt, method)("h", ambiguous="infer")\n expected = df1["date"]\n tm.assert_series_equal(result, expected)\n\n # bool-array\n result = getattr(df1.date.dt, method)("h", ambiguous=[True, False, False])\n tm.assert_series_equal(result, expected)\n\n # NaT\n result = getattr(df1.date.dt, method)("h", ambiguous="NaT")\n expected = df1["date"].copy()\n expected.iloc[0:2] = pd.NaT\n tm.assert_series_equal(result, expected)\n\n # raise\n with tm.external_error_raised(pytz.AmbiguousTimeError):\n getattr(df1.date.dt, method)("h", ambiguous="raise")\n\n @pytest.mark.parametrize(\n "method, ts_str, freq",\n [\n ["ceil", "2018-03-11 01:59:00-0600", "5min"],\n ["round", "2018-03-11 01:59:00-0600", "5min"],\n ["floor", "2018-03-11 03:01:00-0500", "2h"],\n ],\n )\n def test_dt_round_tz_nonexistent(self, method, ts_str, freq):\n # GH 23324 round near "spring forward" DST\n ser = Series([pd.Timestamp(ts_str, tz="America/Chicago")])\n result = getattr(ser.dt, method)(freq, nonexistent="shift_forward")\n expected = Series([pd.Timestamp("2018-03-11 03:00:00", tz="America/Chicago")])\n tm.assert_series_equal(result, expected)\n\n result = getattr(ser.dt, method)(freq, nonexistent="NaT")\n expected = Series([pd.NaT]).dt.tz_localize(result.dt.tz)\n tm.assert_series_equal(result, expected)\n\n with pytest.raises(pytz.NonExistentTimeError, match="2018-03-11 02:00:00"):\n getattr(ser.dt, method)(freq, nonexistent="raise")\n\n @pytest.mark.parametrize("freq", ["ns", "us", "1000us"])\n def test_dt_round_nonnano_higher_resolution_no_op(self, freq):\n # GH 52761\n ser = Series(\n ["2020-05-31 08:00:00", "2000-12-31 04:00:05", "1800-03-14 07:30:20"],\n dtype="datetime64[ms]",\n )\n expected = ser.copy()\n result = ser.dt.round(freq)\n tm.assert_series_equal(result, expected)\n\n assert not np.shares_memory(ser.array._ndarray, result.array._ndarray)\n\n def test_dt_namespace_accessor_categorical(self):\n # GH 19468\n dti = DatetimeIndex(["20171111", "20181212"]).repeat(2)\n ser = Series(pd.Categorical(dti), name="foo")\n result = ser.dt.year\n expected = Series([2017, 2017, 2018, 2018], dtype="int32", name="foo")\n tm.assert_series_equal(result, expected)\n\n def test_dt_tz_localize_categorical(self, tz_aware_fixture):\n # GH 27952\n tz = tz_aware_fixture\n datetimes = Series(\n ["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns]"\n )\n categorical = datetimes.astype("category")\n result = categorical.dt.tz_localize(tz)\n expected = datetimes.dt.tz_localize(tz)\n tm.assert_series_equal(result, expected)\n\n def test_dt_tz_convert_categorical(self, tz_aware_fixture):\n # GH 27952\n tz = tz_aware_fixture\n datetimes = Series(\n ["2019-01-01", "2019-01-01", "2019-01-02"], dtype="datetime64[ns, MET]"\n )\n categorical = datetimes.astype("category")\n result = categorical.dt.tz_convert(tz)\n expected = datetimes.dt.tz_convert(tz)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("accessor", ["year", "month", "day"])\n def test_dt_other_accessors_categorical(self, accessor):\n # GH 27952\n datetimes = Series(\n ["2018-01-01", "2018-01-01", "2019-01-02"], dtype="datetime64[ns]"\n )\n categorical = datetimes.astype("category")\n result = getattr(categorical.dt, accessor)\n expected = getattr(datetimes.dt, accessor)\n tm.assert_series_equal(result, expected)\n\n def test_dt_accessor_no_new_attributes(self):\n # https://github.com/pandas-dev/pandas/issues/10673\n ser = Series(date_range("20130101", periods=5, freq="D"))\n with pytest.raises(AttributeError, match="You cannot add any new attribute"):\n ser.dt.xlabel = "a"\n\n # error: Unsupported operand types for + ("List[None]" and "List[str]")\n @pytest.mark.parametrize(\n "time_locale", [None] + tm.get_locales() # type: ignore[operator]\n )\n def test_dt_accessor_datetime_name_accessors(self, time_locale):\n # Test Monday -> Sunday and January -> December, in that sequence\n if time_locale is None:\n # If the time_locale is None, day-name and month_name should\n # return the english attributes\n expected_days = [\n "Monday",\n "Tuesday",\n "Wednesday",\n "Thursday",\n "Friday",\n "Saturday",\n "Sunday",\n ]\n expected_months = [\n "January",\n "February",\n "March",\n "April",\n "May",\n "June",\n "July",\n "August",\n "September",\n "October",\n "November",\n "December",\n ]\n else:\n with tm.set_locale(time_locale, locale.LC_TIME):\n expected_days = calendar.day_name[:]\n expected_months = calendar.month_name[1:]\n\n ser = Series(date_range(freq="D", start=datetime(1998, 1, 1), periods=365))\n english_days = [\n "Monday",\n "Tuesday",\n "Wednesday",\n "Thursday",\n "Friday",\n "Saturday",\n "Sunday",\n ]\n for day, name, eng_name in zip(range(4, 11), expected_days, english_days):\n name = name.capitalize()\n assert ser.dt.day_name(locale=time_locale)[day] == name\n assert ser.dt.day_name(locale=None)[day] == eng_name\n ser = pd.concat([ser, Series([pd.NaT])])\n assert np.isnan(ser.dt.day_name(locale=time_locale).iloc[-1])\n\n ser = Series(date_range(freq="ME", start="2012", end="2013"))\n result = ser.dt.month_name(locale=time_locale)\n expected = Series([month.capitalize() for month in expected_months])\n\n # work around https://github.com/pandas-dev/pandas/issues/22342\n result = result.str.normalize("NFD")\n expected = expected.str.normalize("NFD")\n\n tm.assert_series_equal(result, expected)\n\n for s_date, expected in zip(ser, expected_months):\n result = s_date.month_name(locale=time_locale)\n expected = expected.capitalize()\n\n result = unicodedata.normalize("NFD", result)\n expected = unicodedata.normalize("NFD", expected)\n\n assert result == expected\n\n ser = pd.concat([ser, Series([pd.NaT])])\n assert np.isnan(ser.dt.month_name(locale=time_locale).iloc[-1])\n\n def test_strftime(self):\n # GH 10086\n ser = Series(date_range("20130101", periods=5))\n result = ser.dt.strftime("%Y/%m/%d")\n expected = Series(\n ["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"]\n )\n tm.assert_series_equal(result, expected)\n\n ser = Series(date_range("2015-02-03 11:22:33.4567", periods=5))\n result = ser.dt.strftime("%Y/%m/%d %H-%M-%S")\n expected = Series(\n [\n "2015/02/03 11-22-33",\n "2015/02/04 11-22-33",\n "2015/02/05 11-22-33",\n "2015/02/06 11-22-33",\n "2015/02/07 11-22-33",\n ]\n )\n tm.assert_series_equal(result, expected)\n\n ser = Series(period_range("20130101", periods=5))\n result = ser.dt.strftime("%Y/%m/%d")\n expected = Series(\n ["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"]\n )\n tm.assert_series_equal(result, expected)\n\n ser = Series(period_range("2015-02-03 11:22:33.4567", periods=5, freq="s"))\n result = ser.dt.strftime("%Y/%m/%d %H-%M-%S")\n expected = Series(\n [\n "2015/02/03 11-22-33",\n "2015/02/03 11-22-34",\n "2015/02/03 11-22-35",\n "2015/02/03 11-22-36",\n "2015/02/03 11-22-37",\n ]\n )\n tm.assert_series_equal(result, expected)\n\n def test_strftime_dt64_days(self):\n ser = Series(date_range("20130101", periods=5))\n ser.iloc[0] = pd.NaT\n result = ser.dt.strftime("%Y/%m/%d")\n expected = Series(\n [np.nan, "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"]\n )\n tm.assert_series_equal(result, expected)\n\n datetime_index = date_range("20150301", periods=5)\n result = datetime_index.strftime("%Y/%m/%d")\n\n expected = Index(\n ["2015/03/01", "2015/03/02", "2015/03/03", "2015/03/04", "2015/03/05"],\n )\n # dtype may be S10 or U10 depending on python version\n tm.assert_index_equal(result, expected)\n\n def test_strftime_period_days(self, using_infer_string):\n period_index = period_range("20150301", periods=5)\n result = period_index.strftime("%Y/%m/%d")\n expected = Index(\n ["2015/03/01", "2015/03/02", "2015/03/03", "2015/03/04", "2015/03/05"],\n dtype="=U10",\n )\n if using_infer_string:\n expected = expected.astype(StringDtype(na_value=np.nan))\n tm.assert_index_equal(result, expected)\n\n def test_strftime_dt64_microsecond_resolution(self):\n ser = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)])\n result = ser.dt.strftime("%Y-%m-%d %H:%M:%S")\n expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"])\n tm.assert_series_equal(result, expected)\n\n def test_strftime_period_hours(self):\n ser = Series(period_range("20130101", periods=4, freq="h"))\n result = ser.dt.strftime("%Y/%m/%d %H:%M:%S")\n expected = Series(\n [\n "2013/01/01 00:00:00",\n "2013/01/01 01:00:00",\n "2013/01/01 02:00:00",\n "2013/01/01 03:00:00",\n ]\n )\n tm.assert_series_equal(result, expected)\n\n def test_strftime_period_minutes(self):\n ser = Series(period_range("20130101", periods=4, freq="ms"))\n result = ser.dt.strftime("%Y/%m/%d %H:%M:%S.%l")\n expected = Series(\n [\n "2013/01/01 00:00:00.000",\n "2013/01/01 00:00:00.001",\n "2013/01/01 00:00:00.002",\n "2013/01/01 00:00:00.003",\n ]\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data",\n [\n DatetimeIndex(["2019-01-01", pd.NaT]),\n PeriodIndex(["2019-01-01", pd.NaT], dtype="period[D]"),\n ],\n )\n def test_strftime_nat(self, data):\n # GH 29578\n ser = Series(data)\n result = ser.dt.strftime("%Y-%m-%d")\n expected = Series(["2019-01-01", np.nan])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data", [DatetimeIndex([pd.NaT]), PeriodIndex([pd.NaT], dtype="period[D]")]\n )\n def test_strftime_all_nat(self, data):\n # https://github.com/pandas-dev/pandas/issues/45858\n ser = Series(data)\n with tm.assert_produces_warning(None):\n result = ser.dt.strftime("%Y-%m-%d")\n expected = Series([np.nan], dtype="str")\n tm.assert_series_equal(result, expected)\n\n def test_valid_dt_with_missing_values(self):\n # GH 8689\n ser = Series(date_range("20130101", periods=5, freq="D"))\n ser.iloc[2] = pd.NaT\n\n for attr in ["microsecond", "nanosecond", "second", "minute", "hour", "day"]:\n expected = getattr(ser.dt, attr).copy()\n expected.iloc[2] = np.nan\n result = getattr(ser.dt, attr)\n tm.assert_series_equal(result, expected)\n\n result = ser.dt.date\n expected = Series(\n [\n date(2013, 1, 1),\n date(2013, 1, 2),\n pd.NaT,\n date(2013, 1, 4),\n date(2013, 1, 5),\n ],\n dtype="object",\n )\n tm.assert_series_equal(result, expected)\n\n result = ser.dt.time\n expected = Series([time(0), time(0), pd.NaT, time(0), time(0)], dtype="object")\n tm.assert_series_equal(result, expected)\n\n def test_dt_accessor_api(self):\n # GH 9322\n from pandas.core.indexes.accessors import (\n CombinedDatetimelikeProperties,\n DatetimeProperties,\n )\n\n assert Series.dt is CombinedDatetimelikeProperties\n\n ser = Series(date_range("2000-01-01", periods=3))\n assert isinstance(ser.dt, DatetimeProperties)\n\n @pytest.mark.parametrize(\n "ser",\n [\n Series(np.arange(5)),\n Series(list("abcde")),\n Series(np.random.default_rng(2).standard_normal(5)),\n ],\n )\n def test_dt_accessor_invalid(self, ser):\n # GH#9322 check that series with incorrect dtypes don't have attr\n with pytest.raises(AttributeError, match="only use .dt accessor"):\n ser.dt\n assert not hasattr(ser, "dt")\n\n def test_dt_accessor_updates_on_inplace(self):\n ser = Series(date_range("2018-01-01", periods=10))\n ser[2] = None\n return_value = ser.fillna(pd.Timestamp("2018-01-01"), inplace=True)\n assert return_value is None\n result = ser.dt.date\n assert result[0] == result[2]\n\n def test_date_tz(self):\n # GH11757\n rng = DatetimeIndex(\n ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"],\n tz="US/Eastern",\n )\n ser = Series(rng)\n expected = Series([date(2014, 4, 4), date(2014, 7, 18), date(2015, 11, 22)])\n tm.assert_series_equal(ser.dt.date, expected)\n tm.assert_series_equal(ser.apply(lambda x: x.date()), expected)\n\n def test_dt_timetz_accessor(self, tz_naive_fixture):\n # GH21358\n tz = maybe_get_tz(tz_naive_fixture)\n\n dtindex = DatetimeIndex(\n ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz=tz\n )\n ser = Series(dtindex)\n expected = Series(\n [time(23, 56, tzinfo=tz), time(21, 24, tzinfo=tz), time(22, 14, tzinfo=tz)]\n )\n result = ser.dt.timetz\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "input_series, expected_output",\n [\n [["2020-01-01"], [[2020, 1, 3]]],\n [[pd.NaT], [[np.nan, np.nan, np.nan]]],\n [["2019-12-31", "2019-12-29"], [[2020, 1, 2], [2019, 52, 7]]],\n [["2010-01-01", pd.NaT], [[2009, 53, 5], [np.nan, np.nan, np.nan]]],\n # see GH#36032\n [["2016-01-08", "2016-01-04"], [[2016, 1, 5], [2016, 1, 1]]],\n [["2016-01-07", "2016-01-01"], [[2016, 1, 4], [2015, 53, 5]]],\n ],\n )\n def test_isocalendar(self, input_series, expected_output):\n result = pd.to_datetime(Series(input_series)).dt.isocalendar()\n expected_frame = DataFrame(\n expected_output, columns=["year", "week", "day"], dtype="UInt32"\n )\n tm.assert_frame_equal(result, expected_frame)\n\n def test_hour_index(self):\n dt_series = Series(\n date_range(start="2021-01-01", periods=5, freq="h"),\n index=[2, 6, 7, 8, 11],\n dtype="category",\n )\n result = dt_series.dt.hour\n expected = Series(\n [0, 1, 2, 3, 4],\n dtype="int32",\n index=[2, 6, 7, 8, 11],\n )\n tm.assert_series_equal(result, expected)\n\n\nclass TestSeriesPeriodValuesDtAccessor:\n @pytest.mark.parametrize(\n "input_vals",\n [\n [Period("2016-01", freq="M"), Period("2016-02", freq="M")],\n [Period("2016-01-01", freq="D"), Period("2016-01-02", freq="D")],\n [\n Period("2016-01-01 00:00:00", freq="h"),\n Period("2016-01-01 01:00:00", freq="h"),\n ],\n [\n Period("2016-01-01 00:00:00", freq="M"),\n Period("2016-01-01 00:01:00", freq="M"),\n ],\n [\n Period("2016-01-01 00:00:00", freq="s"),\n Period("2016-01-01 00:00:01", freq="s"),\n ],\n ],\n )\n def test_end_time_timevalues(self, input_vals):\n # GH#17157\n # Check that the time part of the Period is adjusted by end_time\n # when using the dt accessor on a Series\n input_vals = PeriodArray._from_sequence(np.asarray(input_vals))\n\n ser = Series(input_vals)\n result = ser.dt.end_time\n expected = ser.apply(lambda x: x.end_time)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("input_vals", [("2001"), ("NaT")])\n def test_to_period(self, input_vals):\n # GH#21205\n expected = Series([input_vals], dtype="Period[D]")\n result = Series([input_vals], dtype="datetime64[ns]").dt.to_period("D")\n tm.assert_series_equal(result, expected)\n\n\ndef test_normalize_pre_epoch_dates():\n # GH: 36294\n ser = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"]))\n result = ser.dt.normalize()\n expected = pd.to_datetime(Series(["1969-01-01", "2016-01-01"]))\n tm.assert_series_equal(result, expected)\n\n\ndef test_day_attribute_non_nano_beyond_int32():\n # GH 52386\n data = np.array(\n [\n 136457654736252,\n 134736784364431,\n 245345345545332,\n 223432411,\n 2343241,\n 3634548734,\n 23234,\n ],\n dtype="timedelta64[s]",\n )\n ser = Series(data)\n result = ser.dt.days\n expected = Series([1579371003, 1559453522, 2839645203, 2586, 27, 42066, 0])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\accessors\test_dt_accessor.py | test_dt_accessor.py | Python | 29,877 | 0.95 | 0.083037 | 0.085831 | node-utils | 769 | 2025-02-08T14:57:15.269715 | GPL-3.0 | true | 10e20d6bd0ae1eec9a64e9cd69137ec4 |
import re\n\nimport pytest\n\nfrom pandas import (\n ArrowDtype,\n Series,\n)\nimport pandas._testing as tm\n\npa = pytest.importorskip("pyarrow")\n\nfrom pandas.compat import pa_version_under11p0\n\n\n@pytest.mark.parametrize(\n "list_dtype",\n (\n pa.list_(pa.int64()),\n pa.list_(pa.int64(), list_size=3),\n pa.large_list(pa.int64()),\n ),\n)\ndef test_list_getitem(list_dtype):\n ser = Series(\n [[1, 2, 3], [4, None, 5], None],\n dtype=ArrowDtype(list_dtype),\n )\n actual = ser.list[1]\n expected = Series([2, None, None], dtype="int64[pyarrow]")\n tm.assert_series_equal(actual, expected)\n\n\ndef test_list_getitem_slice():\n ser = Series(\n [[1, 2, 3], [4, None, 5], None],\n dtype=ArrowDtype(pa.list_(pa.int64())),\n )\n if pa_version_under11p0:\n with pytest.raises(\n NotImplementedError, match="List slice not supported by pyarrow "\n ):\n ser.list[1:None:None]\n else:\n actual = ser.list[1:None:None]\n expected = Series(\n [[2, 3], [None, 5], None], dtype=ArrowDtype(pa.list_(pa.int64()))\n )\n tm.assert_series_equal(actual, expected)\n\n\ndef test_list_len():\n ser = Series(\n [[1, 2, 3], [4, None], None],\n dtype=ArrowDtype(pa.list_(pa.int64())),\n )\n actual = ser.list.len()\n expected = Series([3, 2, None], dtype=ArrowDtype(pa.int32()))\n tm.assert_series_equal(actual, expected)\n\n\ndef test_list_flatten():\n ser = Series(\n [[1, 2, 3], [4, None], None],\n dtype=ArrowDtype(pa.list_(pa.int64())),\n )\n actual = ser.list.flatten()\n expected = Series([1, 2, 3, 4, None], dtype=ArrowDtype(pa.int64()))\n tm.assert_series_equal(actual, expected)\n\n\ndef test_list_getitem_slice_invalid():\n ser = Series(\n [[1, 2, 3], [4, None, 5], None],\n dtype=ArrowDtype(pa.list_(pa.int64())),\n )\n if pa_version_under11p0:\n with pytest.raises(\n NotImplementedError, match="List slice not supported by pyarrow "\n ):\n ser.list[1:None:0]\n else:\n with pytest.raises(pa.lib.ArrowInvalid, match=re.escape("`step` must be >= 1")):\n ser.list[1:None:0]\n\n\ndef test_list_accessor_non_list_dtype():\n ser = Series(\n [1, 2, 4],\n dtype=ArrowDtype(pa.int64()),\n )\n with pytest.raises(\n AttributeError,\n match=re.escape(\n "Can only use the '.list' accessor with 'list[pyarrow]' dtype, "\n "not int64[pyarrow]."\n ),\n ):\n ser.list[1:None:0]\n\n\n@pytest.mark.parametrize(\n "list_dtype",\n (\n pa.list_(pa.int64()),\n pa.list_(pa.int64(), list_size=3),\n pa.large_list(pa.int64()),\n ),\n)\ndef test_list_getitem_invalid_index(list_dtype):\n ser = Series(\n [[1, 2, 3], [4, None, 5], None],\n dtype=ArrowDtype(list_dtype),\n )\n with pytest.raises(pa.lib.ArrowInvalid, match="Index -1 is out of bounds"):\n ser.list[-1]\n with pytest.raises(pa.lib.ArrowInvalid, match="Index 5 is out of bounds"):\n ser.list[5]\n with pytest.raises(ValueError, match="key must be an int or slice, got str"):\n ser.list["abc"]\n\n\ndef test_list_accessor_not_iterable():\n ser = Series(\n [[1, 2, 3], [4, None], None],\n dtype=ArrowDtype(pa.list_(pa.int64())),\n )\n with pytest.raises(TypeError, match="'ListAccessor' object is not iterable"):\n iter(ser.list)\n | .venv\Lib\site-packages\pandas\tests\series\accessors\test_list_accessor.py | test_list_accessor.py | Python | 3,425 | 0.85 | 0.077519 | 0 | python-kit | 426 | 2025-03-06T15:20:22.585998 | BSD-3-Clause | true | 1aa8d8e99eb883172e89b3bf8dbeb6e7 |
from pandas import Series\n\n\nclass TestSparseAccessor:\n def test_sparse_accessor_updates_on_inplace(self):\n ser = Series([1, 1, 2, 3], dtype="Sparse[int]")\n return_value = ser.drop([0, 1], inplace=True)\n assert return_value is None\n assert ser.sparse.density == 1.0\n | .venv\Lib\site-packages\pandas\tests\series\accessors\test_sparse_accessor.py | test_sparse_accessor.py | Python | 296 | 0.85 | 0.222222 | 0 | vue-tools | 17 | 2024-08-11T13:01:11.828404 | MIT | true | 533dca04c63671ac2323bdc734eee52c |
import re\n\nimport pytest\n\nfrom pandas.compat.pyarrow import (\n pa_version_under11p0,\n pa_version_under13p0,\n)\n\nfrom pandas import (\n ArrowDtype,\n DataFrame,\n Index,\n Series,\n)\nimport pandas._testing as tm\n\npa = pytest.importorskip("pyarrow")\npc = pytest.importorskip("pyarrow.compute")\n\n\ndef test_struct_accessor_dtypes():\n ser = Series(\n [],\n dtype=ArrowDtype(\n pa.struct(\n [\n ("int_col", pa.int64()),\n ("string_col", pa.string()),\n (\n "struct_col",\n pa.struct(\n [\n ("int_col", pa.int64()),\n ("float_col", pa.float64()),\n ]\n ),\n ),\n ]\n )\n ),\n )\n actual = ser.struct.dtypes\n expected = Series(\n [\n ArrowDtype(pa.int64()),\n ArrowDtype(pa.string()),\n ArrowDtype(\n pa.struct(\n [\n ("int_col", pa.int64()),\n ("float_col", pa.float64()),\n ]\n )\n ),\n ],\n index=Index(["int_col", "string_col", "struct_col"]),\n )\n tm.assert_series_equal(actual, expected)\n\n\n@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required")\ndef test_struct_accessor_field():\n index = Index([-100, 42, 123])\n ser = Series(\n [\n {"rice": 1.0, "maize": -1, "wheat": "a"},\n {"rice": 2.0, "maize": 0, "wheat": "b"},\n {"rice": 3.0, "maize": 1, "wheat": "c"},\n ],\n dtype=ArrowDtype(\n pa.struct(\n [\n ("rice", pa.float64()),\n ("maize", pa.int64()),\n ("wheat", pa.string()),\n ]\n )\n ),\n index=index,\n )\n by_name = ser.struct.field("maize")\n by_name_expected = Series(\n [-1, 0, 1],\n dtype=ArrowDtype(pa.int64()),\n index=index,\n name="maize",\n )\n tm.assert_series_equal(by_name, by_name_expected)\n\n by_index = ser.struct.field(2)\n by_index_expected = Series(\n ["a", "b", "c"],\n dtype=ArrowDtype(pa.string()),\n index=index,\n name="wheat",\n )\n tm.assert_series_equal(by_index, by_index_expected)\n\n\ndef test_struct_accessor_field_with_invalid_name_or_index():\n ser = Series([], dtype=ArrowDtype(pa.struct([("field", pa.int64())])))\n\n with pytest.raises(ValueError, match="name_or_index must be an int, str,"):\n ser.struct.field(1.1)\n\n\n@pytest.mark.skipif(pa_version_under11p0, reason="pyarrow>=11.0.0 required")\ndef test_struct_accessor_explode():\n index = Index([-100, 42, 123])\n ser = Series(\n [\n {"painted": 1, "snapping": {"sea": "green"}},\n {"painted": 2, "snapping": {"sea": "leatherback"}},\n {"painted": 3, "snapping": {"sea": "hawksbill"}},\n ],\n dtype=ArrowDtype(\n pa.struct(\n [\n ("painted", pa.int64()),\n ("snapping", pa.struct([("sea", pa.string())])),\n ]\n )\n ),\n index=index,\n )\n actual = ser.struct.explode()\n expected = DataFrame(\n {\n "painted": Series([1, 2, 3], index=index, dtype=ArrowDtype(pa.int64())),\n "snapping": Series(\n [{"sea": "green"}, {"sea": "leatherback"}, {"sea": "hawksbill"}],\n index=index,\n dtype=ArrowDtype(pa.struct([("sea", pa.string())])),\n ),\n },\n )\n tm.assert_frame_equal(actual, expected)\n\n\n@pytest.mark.parametrize(\n "invalid",\n [\n pytest.param(Series([1, 2, 3], dtype="int64"), id="int64"),\n pytest.param(\n Series(["a", "b", "c"], dtype="string[pyarrow]"), id="string-pyarrow"\n ),\n ],\n)\ndef test_struct_accessor_api_for_invalid(invalid):\n with pytest.raises(\n AttributeError,\n match=re.escape(\n "Can only use the '.struct' accessor with 'struct[pyarrow]' dtype, "\n f"not {invalid.dtype}."\n ),\n ):\n invalid.struct\n\n\n@pytest.mark.parametrize(\n ["indices", "name"],\n [\n (0, "int_col"),\n ([1, 2], "str_col"),\n (pc.field("int_col"), "int_col"),\n ("int_col", "int_col"),\n (b"string_col", b"string_col"),\n ([b"string_col"], "string_col"),\n ],\n)\n@pytest.mark.skipif(pa_version_under13p0, reason="pyarrow>=13.0.0 required")\ndef test_struct_accessor_field_expanded(indices, name):\n arrow_type = pa.struct(\n [\n ("int_col", pa.int64()),\n (\n "struct_col",\n pa.struct(\n [\n ("int_col", pa.int64()),\n ("float_col", pa.float64()),\n ("str_col", pa.string()),\n ]\n ),\n ),\n (b"string_col", pa.string()),\n ]\n )\n\n data = pa.array([], type=arrow_type)\n ser = Series(data, dtype=ArrowDtype(arrow_type))\n expected = pc.struct_field(data, indices)\n result = ser.struct.field(indices)\n tm.assert_equal(result.array._pa_array.combine_chunks(), expected)\n assert result.name == name\n | .venv\Lib\site-packages\pandas\tests\series\accessors\test_struct_accessor.py | test_struct_accessor.py | Python | 5,443 | 0.85 | 0.030612 | 0 | vue-tools | 813 | 2025-05-27T07:41:51.601821 | MIT | true | a2ad97cdf9b43bfe0826e22757b6f323 |
import pytest\n\nfrom pandas import Series\nimport pandas._testing as tm\n\n\nclass TestStrAccessor:\n def test_str_attribute(self):\n # GH#9068\n methods = ["strip", "rstrip", "lstrip"]\n ser = Series([" jack", "jill ", " jesse ", "frank"])\n for method in methods:\n expected = Series([getattr(str, method)(x) for x in ser.values])\n tm.assert_series_equal(getattr(Series.str, method)(ser.str), expected)\n\n # str accessor only valid with string values\n ser = Series(range(5))\n with pytest.raises(AttributeError, match="only use .str accessor"):\n ser.str.repeat(2)\n\n def test_str_accessor_updates_on_inplace(self):\n ser = Series(list("abc"))\n return_value = ser.drop([0], inplace=True)\n assert return_value is None\n assert len(ser.str.lower()) == 2\n | .venv\Lib\site-packages\pandas\tests\series\accessors\test_str_accessor.py | test_str_accessor.py | Python | 853 | 0.95 | 0.2 | 0.1 | awesome-app | 632 | 2025-04-14T01:12:47.478157 | MIT | true | 9be8ffb2151a443290256d4061d60e79 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\accessors\__pycache__\test_cat_accessor.cpython-313.pyc | test_cat_accessor.cpython-313.pyc | Other | 15,681 | 0.8 | 0 | 0 | awesome-app | 751 | 2024-12-21T21:14:54.970619 | BSD-3-Clause | true | 3a8af3bcdf003aa6deea26b49487fce4 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\accessors\__pycache__\test_dt_accessor.cpython-313.pyc | test_dt_accessor.cpython-313.pyc | Other | 43,339 | 0.8 | 0 | 0.007042 | vue-tools | 193 | 2024-06-19T19:48:41.869131 | Apache-2.0 | true | bba163570adcb50fd9f642463ce16f81 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\accessors\__pycache__\test_list_accessor.cpython-313.pyc | test_list_accessor.cpython-313.pyc | Other | 7,187 | 0.95 | 0 | 0 | python-kit | 969 | 2024-01-01T03:03:36.367959 | Apache-2.0 | true | 7544620a3182e7fa7f2506665712da41 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\accessors\__pycache__\test_sparse_accessor.cpython-313.pyc | test_sparse_accessor.cpython-313.pyc | Other | 1,001 | 0.8 | 0 | 0 | node-utils | 869 | 2024-10-25T02:01:01.775373 | MIT | true | e087c33efc4f06de1a04aece71139b6d |
\n\n | .venv\Lib\site-packages\pandas\tests\series\accessors\__pycache__\test_struct_accessor.cpython-313.pyc | test_struct_accessor.cpython-313.pyc | Other | 7,780 | 0.95 | 0 | 0.011905 | react-lib | 826 | 2024-09-28T14:17:56.094699 | GPL-3.0 | true | 991fc6210439d94187021398e810afaa |
\n\n | .venv\Lib\site-packages\pandas\tests\series\accessors\__pycache__\test_str_accessor.cpython-313.pyc | test_str_accessor.cpython-313.pyc | Other | 2,124 | 0.8 | 0 | 0 | vue-tools | 936 | 2025-05-18T11:43:36.313432 | BSD-3-Clause | true | 6bc9ef8d0904115794d64b808f62fbb9 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\accessors\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 204 | 0.7 | 0 | 0 | python-kit | 535 | 2024-08-18T05:58:35.642142 | BSD-3-Clause | true | 615a6d0dce0f518dbe26866847605d30 |
"""\nAlso test support for datetime64[ns] in Series / DataFrame\n"""\nfrom datetime import (\n datetime,\n timedelta,\n)\nimport re\n\nfrom dateutil.tz import (\n gettz,\n tzutc,\n)\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs import index as libindex\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Series,\n Timestamp,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\n\n\ndef test_fancy_getitem():\n dti = date_range(\n freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)\n )\n\n s = Series(np.arange(len(dti)), index=dti)\n\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert s[48] == 48\n assert s["1/2/2009"] == 48\n assert s["2009-1-2"] == 48\n assert s[datetime(2009, 1, 2)] == 48\n assert s[Timestamp(datetime(2009, 1, 2))] == 48\n with pytest.raises(KeyError, match=r"^'2009-1-3'$"):\n s["2009-1-3"]\n tm.assert_series_equal(\n s["3/6/2009":"2009-06-05"], s[datetime(2009, 3, 6) : datetime(2009, 6, 5)]\n )\n\n\ndef test_fancy_setitem():\n dti = date_range(\n freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)\n )\n\n s = Series(np.arange(len(dti)), index=dti)\n\n msg = "Series.__setitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n s[48] = -1\n assert s.iloc[48] == -1\n s["1/2/2009"] = -2\n assert s.iloc[48] == -2\n s["1/2/2009":"2009-06-05"] = -3\n assert (s[48:54] == -3).all()\n\n\n@pytest.mark.parametrize("tz_source", ["pytz", "dateutil"])\ndef test_getitem_setitem_datetime_tz(tz_source):\n if tz_source == "pytz":\n tzget = pytz.timezone\n else:\n # handle special case for utc in dateutil\n tzget = lambda x: tzutc() if x == "UTC" else gettz(x)\n\n N = 50\n # testing with timezone, GH #2785\n rng = date_range("1/1/1990", periods=N, freq="h", tz=tzget("US/Eastern"))\n ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result["1990-01-01 09:00:00+00:00"] = 0\n result["1990-01-01 09:00:00+00:00"] = ts.iloc[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n result["1990-01-01 03:00:00-06:00"] = 0\n result["1990-01-01 03:00:00-06:00"] = ts.iloc[4]\n tm.assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tzget("UTC"))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tzget("UTC"))] = ts.iloc[4]\n tm.assert_series_equal(result, ts)\n\n result = ts.copy()\n dt = Timestamp(1990, 1, 1, 3).tz_localize(tzget("US/Central"))\n dt = dt.to_pydatetime()\n result[dt] = 0\n result[dt] = ts.iloc[4]\n tm.assert_series_equal(result, ts)\n\n\ndef test_getitem_setitem_datetimeindex():\n N = 50\n # testing with timezone, GH #2785\n rng = date_range("1/1/1990", periods=N, freq="h", tz="US/Eastern")\n ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)\n\n result = ts["1990-01-01 04:00:00"]\n expected = ts.iloc[4]\n assert result == expected\n\n result = ts.copy()\n result["1990-01-01 04:00:00"] = 0\n result["1990-01-01 04:00:00"] = ts.iloc[4]\n tm.assert_series_equal(result, ts)\n\n result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0\n result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]\n tm.assert_series_equal(result, ts)\n\n lb = "1990-01-01 04:00:00"\n rb = "1990-01-01 07:00:00"\n # GH#18435 strings get a pass from tzawareness compat\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n lb = "1990-01-01 04:00:00-0500"\n rb = "1990-01-01 07:00:00-0500"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n # But we do not give datetimes a pass on tzawareness compat\n msg = "Cannot compare tz-naive and tz-aware datetime-like objects"\n naive = datetime(1990, 1, 1, 4)\n for key in [naive, Timestamp(naive), np.datetime64(naive, "ns")]:\n with pytest.raises(KeyError, match=re.escape(repr(key))):\n # GH#36148 as of 2.0 we require tzawareness-compat\n ts[key]\n\n result = ts.copy()\n # GH#36148 as of 2.0 we do not ignore tzawareness mismatch in indexing,\n # so setting it as a new key casts to object rather than matching\n # rng[4]\n result[naive] = ts.iloc[4]\n assert result.index.dtype == object\n tm.assert_index_equal(result.index[:-1], rng.astype(object))\n assert result.index[-1] == naive\n\n msg = "Cannot compare tz-naive and tz-aware datetime-like objects"\n with pytest.raises(TypeError, match=msg):\n # GH#36148 require tzawareness compat as of 2.0\n ts[naive : datetime(1990, 1, 1, 7)]\n\n result = ts.copy()\n with pytest.raises(TypeError, match=msg):\n # GH#36148 require tzawareness compat as of 2.0\n result[naive : datetime(1990, 1, 1, 7)] = 0\n with pytest.raises(TypeError, match=msg):\n # GH#36148 require tzawareness compat as of 2.0\n result[naive : datetime(1990, 1, 1, 7)] = 99\n # the __setitems__ here failed, so result should still match ts\n tm.assert_series_equal(result, ts)\n\n lb = naive\n rb = datetime(1990, 1, 1, 7)\n msg = r"Invalid comparison between dtype=datetime64\[ns, US/Eastern\] and datetime"\n with pytest.raises(TypeError, match=msg):\n # tznaive vs tzaware comparison is invalid\n # see GH#18376, GH#18162\n ts[(ts.index >= lb) & (ts.index <= rb)]\n\n lb = Timestamp(naive).tz_localize(rng.tzinfo)\n rb = Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo)\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts[ts.index[4]]\n expected = ts.iloc[4]\n assert result == expected\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result.iloc[4:8] = ts.iloc[4:8]\n tm.assert_series_equal(result, ts)\n\n # also test partial date slicing\n result = ts["1990-01-02"]\n expected = ts[24:48]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result["1990-01-02"] = 0\n result["1990-01-02"] = ts[24:48]\n tm.assert_series_equal(result, ts)\n\n\ndef test_getitem_setitem_periodindex():\n N = 50\n rng = period_range("1/1/1990", periods=N, freq="h")\n ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)\n\n result = ts["1990-01-01 04"]\n expected = ts.iloc[4]\n assert result == expected\n\n result = ts.copy()\n result["1990-01-01 04"] = 0\n result["1990-01-01 04"] = ts.iloc[4]\n tm.assert_series_equal(result, ts)\n\n result = ts["1990-01-01 04":"1990-01-01 07"]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result["1990-01-01 04":"1990-01-01 07"] = 0\n result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]\n tm.assert_series_equal(result, ts)\n\n lb = "1990-01-01 04"\n rb = "1990-01-01 07"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n # GH 2782\n result = ts[ts.index[4]]\n expected = ts.iloc[4]\n assert result == expected\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n tm.assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result.iloc[4:8] = ts.iloc[4:8]\n tm.assert_series_equal(result, ts)\n\n\ndef test_datetime_indexing():\n index = date_range("1/1/2000", "1/7/2000")\n index = index.repeat(3)\n\n s = Series(len(index), index=index)\n stamp = Timestamp("1/8/2000")\n\n with pytest.raises(KeyError, match=re.escape(repr(stamp))):\n s[stamp]\n s[stamp] = 0\n assert s[stamp] == 0\n\n # not monotonic\n s = Series(len(index), index=index)\n s = s[::-1]\n\n with pytest.raises(KeyError, match=re.escape(repr(stamp))):\n s[stamp]\n s[stamp] = 0\n assert s[stamp] == 0\n\n\n# test duplicates in time series\n\n\ndef test_indexing_with_duplicate_datetimeindex(\n rand_series_with_duplicate_datetimeindex,\n):\n ts = rand_series_with_duplicate_datetimeindex\n\n uniques = ts.index.unique()\n for date in uniques:\n result = ts[date]\n\n mask = ts.index == date\n total = (ts.index == date).sum()\n expected = ts[mask]\n if total > 1:\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_almost_equal(result, expected.iloc[0])\n\n cp = ts.copy()\n cp[date] = 0\n expected = Series(np.where(mask, 0, ts), index=ts.index)\n tm.assert_series_equal(cp, expected)\n\n key = datetime(2000, 1, 6)\n with pytest.raises(KeyError, match=re.escape(repr(key))):\n ts[key]\n\n # new index\n ts[datetime(2000, 1, 6)] = 0\n assert ts[datetime(2000, 1, 6)] == 0\n\n\ndef test_loc_getitem_over_size_cutoff(monkeypatch):\n # #1821\n\n monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 1000)\n\n # create large list of non periodic datetime\n dates = []\n sec = timedelta(seconds=1)\n half_sec = timedelta(microseconds=500000)\n d = datetime(2011, 12, 5, 20, 30)\n n = 1100\n for i in range(n):\n dates.append(d)\n dates.append(d + sec)\n dates.append(d + sec + half_sec)\n dates.append(d + sec + sec + half_sec)\n d += 3 * sec\n\n # duplicate some values in the list\n duplicate_positions = np.random.default_rng(2).integers(0, len(dates) - 1, 20)\n for p in duplicate_positions:\n dates[p + 1] = dates[p]\n\n df = DataFrame(\n np.random.default_rng(2).standard_normal((len(dates), 4)),\n index=dates,\n columns=list("ABCD"),\n )\n\n pos = n * 3\n timestamp = df.index[pos]\n assert timestamp in df.index\n\n # it works!\n df.loc[timestamp]\n assert len(df.loc[[timestamp]]) > 0\n\n\ndef test_indexing_over_size_cutoff_period_index(monkeypatch):\n # GH 27136\n\n monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 1000)\n\n n = 1100\n idx = period_range("1/1/2000", freq="min", periods=n)\n assert idx._engine.over_size_threshold\n\n s = Series(np.random.default_rng(2).standard_normal(len(idx)), index=idx)\n\n pos = n - 1\n timestamp = idx[pos]\n assert timestamp in s.index\n\n # it works!\n s[timestamp]\n assert len(s.loc[[timestamp]]) > 0\n\n\ndef test_indexing_unordered():\n # GH 2437\n rng = date_range(start="2011-01-01", end="2011-01-15")\n ts = Series(np.random.default_rng(2).random(len(rng)), index=rng)\n ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])\n\n for t in ts.index:\n expected = ts[t]\n result = ts2[t]\n assert expected == result\n\n # GH 3448 (ranges)\n def compare(slobj):\n result = ts2[slobj].copy()\n result = result.sort_index()\n expected = ts[slobj]\n expected.index = expected.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n for key in [\n slice("2011-01-01", "2011-01-15"),\n slice("2010-12-30", "2011-01-15"),\n slice("2011-01-01", "2011-01-16"),\n # partial ranges\n slice("2011-01-01", "2011-01-6"),\n slice("2011-01-06", "2011-01-8"),\n slice("2011-01-06", "2011-01-12"),\n ]:\n with pytest.raises(\n KeyError, match="Value based partial slicing on non-monotonic"\n ):\n compare(key)\n\n # single values\n result = ts2["2011"].sort_index()\n expected = ts["2011"]\n expected.index = expected.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n\ndef test_indexing_unordered2():\n # diff freq\n rng = date_range(datetime(2005, 1, 1), periods=20, freq="ME")\n ts = Series(np.arange(len(rng)), index=rng)\n ts = ts.take(np.random.default_rng(2).permutation(20))\n\n result = ts["2005"]\n for t in result.index:\n assert t.year == 2005\n\n\ndef test_indexing():\n idx = date_range("2001-1-1", periods=20, freq="ME")\n ts = Series(np.random.default_rng(2).random(len(idx)), index=idx)\n\n # getting\n\n # GH 3070, make sure semantics work on Series/Frame\n result = ts["2001"]\n tm.assert_series_equal(result, ts.iloc[:12])\n\n df = DataFrame({"A": ts.copy()})\n\n # GH#36179 pre-2.0 df["2001"] operated as slicing on rows. in 2.0 it behaves\n # like any other key, so raises\n with pytest.raises(KeyError, match="2001"):\n df["2001"]\n\n # setting\n ts = Series(np.random.default_rng(2).random(len(idx)), index=idx)\n expected = ts.copy()\n expected.iloc[:12] = 1\n ts["2001"] = 1\n tm.assert_series_equal(ts, expected)\n\n expected = df.copy()\n expected.iloc[:12, 0] = 1\n df.loc["2001", "A"] = 1\n tm.assert_frame_equal(df, expected)\n\n\ndef test_getitem_str_month_with_datetimeindex():\n # GH3546 (not including times on the last day)\n idx = date_range(start="2013-05-31 00:00", end="2013-05-31 23:00", freq="h")\n ts = Series(range(len(idx)), index=idx)\n expected = ts["2013-05"]\n tm.assert_series_equal(expected, ts)\n\n idx = date_range(start="2013-05-31 00:00", end="2013-05-31 23:59", freq="s")\n ts = Series(range(len(idx)), index=idx)\n expected = ts["2013-05"]\n tm.assert_series_equal(expected, ts)\n\n\ndef test_getitem_str_year_with_datetimeindex():\n idx = [\n Timestamp("2013-05-31 00:00"),\n Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999)),\n ]\n ts = Series(range(len(idx)), index=idx)\n expected = ts["2013"]\n tm.assert_series_equal(expected, ts)\n\n\ndef test_getitem_str_second_with_datetimeindex():\n # GH14826, indexing with a seconds resolution string / datetime object\n df = DataFrame(\n np.random.default_rng(2).random((5, 5)),\n columns=["open", "high", "low", "close", "volume"],\n index=date_range("2012-01-02 18:01:00", periods=5, tz="US/Central", freq="s"),\n )\n\n # this is a single date, so will raise\n with pytest.raises(KeyError, match=r"^'2012-01-02 18:01:02'$"):\n df["2012-01-02 18:01:02"]\n\n msg = r"Timestamp\('2012-01-02 18:01:02-0600', tz='US/Central'\)"\n with pytest.raises(KeyError, match=msg):\n df[df.index[2]]\n\n\ndef test_compare_datetime_with_all_none():\n # GH#54870\n ser = Series(["2020-01-01", "2020-01-02"], dtype="datetime64[ns]")\n ser2 = Series([None, None])\n result = ser > ser2\n expected = Series([False, False])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_datetime.py | test_datetime.py | Python | 14,752 | 0.95 | 0.058116 | 0.106599 | awesome-app | 627 | 2024-10-18T13:40:51.782357 | BSD-3-Clause | true | b52c8846a19ca18bd461b3f7479be596 |
import pytest\n\nfrom pandas import (\n Index,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesDelItem:\n def test_delitem(self):\n # GH#5542\n # should delete the item inplace\n s = Series(range(5))\n del s[0]\n\n expected = Series(range(1, 5), index=range(1, 5))\n tm.assert_series_equal(s, expected)\n\n del s[1]\n expected = Series(range(2, 5), index=range(2, 5))\n tm.assert_series_equal(s, expected)\n\n # only 1 left, del, add, del\n s = Series(1)\n del s[0]\n tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))\n s[0] = 1\n tm.assert_series_equal(s, Series(1))\n del s[0]\n tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))\n\n def test_delitem_object_index(self):\n # Index(dtype=object)\n s = Series(1, index=Index(["a"], dtype="str"))\n del s["a"]\n tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="str")))\n s["a"] = 1\n tm.assert_series_equal(s, Series(1, index=Index(["a"], dtype="str")))\n del s["a"]\n tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="str")))\n\n def test_delitem_missing_key(self):\n # empty\n s = Series(dtype=object)\n\n with pytest.raises(KeyError, match=r"^0$"):\n del s[0]\n\n def test_delitem_extension_dtype(self):\n # GH#40386\n # DatetimeTZDtype\n dti = date_range("2016-01-01", periods=3, tz="US/Pacific")\n ser = Series(dti)\n\n expected = ser[[0, 2]]\n del ser[1]\n assert ser.dtype == dti.dtype\n tm.assert_series_equal(ser, expected)\n\n # PeriodDtype\n pi = dti.tz_localize(None).to_period("D")\n ser = Series(pi)\n\n expected = ser[:2]\n del ser[2]\n assert ser.dtype == pi.dtype\n tm.assert_series_equal(ser, expected)\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_delitem.py | test_delitem.py | Python | 1,969 | 0.95 | 0.072464 | 0.142857 | vue-tools | 775 | 2025-02-02T07:24:13.234392 | MIT | true | 85542cd79d947088a70b7f7fe50fb172 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n Index,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef test_get():\n # GH 6383\n s = Series(\n np.array(\n [\n 43,\n 48,\n 60,\n 48,\n 50,\n 51,\n 50,\n 45,\n 57,\n 48,\n 56,\n 45,\n 51,\n 39,\n 55,\n 43,\n 54,\n 52,\n 51,\n 54,\n ]\n )\n )\n\n result = s.get(25, 0)\n expected = 0\n assert result == expected\n\n s = Series(\n np.array(\n [\n 43,\n 48,\n 60,\n 48,\n 50,\n 51,\n 50,\n 45,\n 57,\n 48,\n 56,\n 45,\n 51,\n 39,\n 55,\n 43,\n 54,\n 52,\n 51,\n 54,\n ]\n ),\n index=Index(\n [\n 25.0,\n 36.0,\n 49.0,\n 64.0,\n 81.0,\n 100.0,\n 121.0,\n 144.0,\n 169.0,\n 196.0,\n 1225.0,\n 1296.0,\n 1369.0,\n 1444.0,\n 1521.0,\n 1600.0,\n 1681.0,\n 1764.0,\n 1849.0,\n 1936.0,\n ],\n dtype=np.float64,\n ),\n )\n\n result = s.get(25, 0)\n expected = 43\n assert result == expected\n\n # GH 7407\n # with a boolean accessor\n df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})\n vc = df.i.value_counts()\n result = vc.get(99, default="Missing")\n assert result == "Missing"\n\n vc = df.b.value_counts()\n result = vc.get(False, default="Missing")\n assert result == 3\n\n result = vc.get(True, default="Missing")\n assert result == "Missing"\n\n\ndef test_get_nan(float_numpy_dtype):\n # GH 8569\n s = Index(range(10), dtype=float_numpy_dtype).to_series()\n assert s.get(np.nan) is None\n assert s.get(np.nan, default="Missing") == "Missing"\n\n\ndef test_get_nan_multiple(float_numpy_dtype):\n # GH 8569\n # ensure that fixing "test_get_nan" above hasn't broken get\n # with multiple elements\n s = Index(range(10), dtype=float_numpy_dtype).to_series()\n\n idx = [2, 30]\n assert s.get(idx) is None\n\n idx = [2, np.nan]\n assert s.get(idx) is None\n\n # GH 17295 - all missing keys\n idx = [20, 30]\n assert s.get(idx) is None\n\n idx = [np.nan, np.nan]\n assert s.get(idx) is None\n\n\ndef test_get_with_default():\n # GH#7725\n d0 = ["a", "b", "c", "d"]\n d1 = np.arange(4, dtype="int64")\n\n for data, index in ((d0, d1), (d1, d0)):\n s = Series(data, index=index)\n for i, d in zip(index, data):\n assert s.get(i) == d\n assert s.get(i, d) == d\n assert s.get(i, "z") == d\n\n assert s.get("e", "z") == "z"\n assert s.get("e", "e") == "e"\n\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n warn = None\n if index is d0:\n warn = FutureWarning\n with tm.assert_produces_warning(warn, match=msg):\n assert s.get(10, "z") == "z"\n assert s.get(10, 10) == 10\n\n\n@pytest.mark.parametrize(\n "arr",\n [\n np.random.default_rng(2).standard_normal(10),\n DatetimeIndex(date_range("2020-01-01", periods=10), name="a").tz_localize(\n tz="US/Eastern"\n ),\n ],\n)\ndef test_get_with_ea(arr):\n # GH#21260\n ser = Series(arr, index=[2 * i for i in range(len(arr))])\n assert ser.get(4) == ser.iloc[2]\n\n result = ser.get([4, 6])\n expected = ser.iloc[[2, 3]]\n tm.assert_series_equal(result, expected)\n\n result = ser.get(slice(2))\n expected = ser.iloc[[0, 1]]\n tm.assert_series_equal(result, expected)\n\n assert ser.get(-1) is None\n assert ser.get(ser.index.max() + 1) is None\n\n ser = Series(arr[:6], index=list("abcdef"))\n assert ser.get("c") == ser.iloc[2]\n\n result = ser.get(slice("b", "d"))\n expected = ser.iloc[[1, 2, 3]]\n tm.assert_series_equal(result, expected)\n\n result = ser.get("Z")\n assert result is None\n\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert ser.get(4) == ser.iloc[4]\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert ser.get(-1) == ser.iloc[-1]\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert ser.get(len(ser)) is None\n\n # GH#21257\n ser = Series(arr)\n ser2 = ser[::2]\n assert ser2.get(1) is None\n\n\ndef test_getitem_get(string_series, object_series):\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n\n for obj in [string_series, object_series]:\n idx = obj.index[5]\n\n assert obj[idx] == obj.get(idx)\n assert obj[idx] == obj.iloc[5]\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert string_series.get(-1) == string_series.get(string_series.index[-1])\n assert string_series.iloc[5] == string_series.get(string_series.index[5])\n\n\ndef test_get_none():\n # GH#5652\n s1 = Series(dtype=object)\n s2 = Series(dtype=object, index=list("abc"))\n for s in [s1, s2]:\n result = s.get(None)\n assert result is None\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_get.py | test_get.py | Python | 5,758 | 0.95 | 0.054622 | 0.060302 | react-lib | 286 | 2023-12-11T00:02:58.187691 | MIT | true | dd3a8cf14bb582f2fe43629805a142ec |
"""\nSeries.__getitem__ test classes are organized by the type of key passed.\n"""\nfrom datetime import (\n date,\n datetime,\n time,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import (\n conversion,\n timezones,\n)\n\nfrom pandas.core.dtypes.common import is_scalar\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n DatetimeIndex,\n Index,\n Series,\n Timestamp,\n date_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexing import IndexingError\n\nfrom pandas.tseries.offsets import BDay\n\n\nclass TestSeriesGetitemScalars:\n def test_getitem_object_index_float_string(self):\n # GH#17286\n ser = Series([1] * 4, index=Index(["a", "b", "c", 1.0]))\n assert ser["a"] == 1\n assert ser[1.0] == 1\n\n def test_getitem_float_keys_tuple_values(self):\n # see GH#13509\n\n # unique Index\n ser = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name="foo")\n result = ser[0.0]\n assert result == (1, 1)\n\n # non-unique Index\n expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name="foo")\n ser = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name="foo")\n\n result = ser[0.0]\n tm.assert_series_equal(result, expected)\n\n def test_getitem_unrecognized_scalar(self):\n # GH#32684 a scalar key that is not recognized by lib.is_scalar\n\n # a series that might be produced via `frame.dtypes`\n ser = Series([1, 2], index=[np.dtype("O"), np.dtype("i8")])\n\n key = ser.index[1]\n\n result = ser[key]\n assert result == 2\n\n def test_getitem_negative_out_of_bounds(self):\n ser = Series(["a"] * 10, index=["a"] * 10)\n\n msg = "index -11 is out of bounds for axis 0 with size 10|index out of bounds"\n warn_msg = "Series.__getitem__ treating keys as positions is deprecated"\n with pytest.raises(IndexError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n ser[-11]\n\n def test_getitem_out_of_bounds_indexerror(self, datetime_series):\n # don't segfault, GH#495\n msg = r"index \d+ is out of bounds for axis 0 with size \d+"\n warn_msg = "Series.__getitem__ treating keys as positions is deprecated"\n with pytest.raises(IndexError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n datetime_series[len(datetime_series)]\n\n def test_getitem_out_of_bounds_empty_rangeindex_keyerror(self):\n # GH#917\n # With a RangeIndex, an int key gives a KeyError\n ser = Series([], dtype=object)\n with pytest.raises(KeyError, match="-1"):\n ser[-1]\n\n def test_getitem_keyerror_with_integer_index(self, any_int_numpy_dtype):\n dtype = any_int_numpy_dtype\n ser = Series(\n np.random.default_rng(2).standard_normal(6),\n index=Index([0, 0, 1, 1, 2, 2], dtype=dtype),\n )\n\n with pytest.raises(KeyError, match=r"^5$"):\n ser[5]\n\n with pytest.raises(KeyError, match=r"^'c'$"):\n ser["c"]\n\n # not monotonic\n ser = Series(\n np.random.default_rng(2).standard_normal(6), index=[2, 2, 0, 0, 1, 1]\n )\n\n with pytest.raises(KeyError, match=r"^5$"):\n ser[5]\n\n with pytest.raises(KeyError, match=r"^'c'$"):\n ser["c"]\n\n def test_getitem_int64(self, datetime_series):\n idx = np.int64(5)\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = datetime_series[idx]\n assert res == datetime_series.iloc[5]\n\n def test_getitem_full_range(self):\n # github.com/pandas-dev/pandas/commit/4f433773141d2eb384325714a2776bcc5b2e20f7\n ser = Series(range(5), index=list(range(5)))\n result = ser[list(range(5))]\n tm.assert_series_equal(result, ser)\n\n # ------------------------------------------------------------------\n # Series with DatetimeIndex\n\n @pytest.mark.parametrize("tzstr", ["Europe/Berlin", "dateutil/Europe/Berlin"])\n def test_getitem_pydatetime_tz(self, tzstr):\n tz = timezones.maybe_get_tz(tzstr)\n\n index = date_range(\n start="2012-12-24 16:00", end="2012-12-24 18:00", freq="h", tz=tzstr\n )\n ts = Series(index=index, data=index.hour)\n time_pandas = Timestamp("2012-12-24 17:00", tz=tzstr)\n\n dt = datetime(2012, 12, 24, 17, 0)\n time_datetime = conversion.localize_pydatetime(dt, tz)\n assert ts[time_pandas] == ts[time_datetime]\n\n @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])\n def test_string_index_alias_tz_aware(self, tz):\n rng = date_range("1/1/2000", periods=10, tz=tz)\n ser = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)\n\n result = ser["1/3/2000"]\n tm.assert_almost_equal(result, ser.iloc[2])\n\n def test_getitem_time_object(self):\n rng = date_range("1/1/2000", "1/5/2000", freq="5min")\n ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)\n\n mask = (rng.hour == 9) & (rng.minute == 30)\n result = ts[time(9, 30)]\n expected = ts[mask]\n result.index = result.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n # ------------------------------------------------------------------\n # Series with CategoricalIndex\n\n def test_getitem_scalar_categorical_index(self):\n cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])\n\n ser = Series([1, 2], index=cats)\n\n expected = ser.iloc[0]\n result = ser[cats[0]]\n assert result == expected\n\n def test_getitem_numeric_categorical_listlike_matches_scalar(self):\n # GH#15470\n ser = Series(["a", "b", "c"], index=pd.CategoricalIndex([2, 1, 0]))\n\n # 0 is treated as a label\n assert ser[0] == "c"\n\n # the listlike analogue should also be treated as labels\n res = ser[[0]]\n expected = ser.iloc[-1:]\n tm.assert_series_equal(res, expected)\n\n res2 = ser[[0, 1, 2]]\n tm.assert_series_equal(res2, ser.iloc[::-1])\n\n def test_getitem_integer_categorical_not_positional(self):\n # GH#14865\n ser = Series(["a", "b", "c"], index=Index([1, 2, 3], dtype="category"))\n assert ser.get(3) == "c"\n assert ser[3] == "c"\n\n def test_getitem_str_with_timedeltaindex(self):\n rng = timedelta_range("1 day 10:11:12", freq="h", periods=500)\n ser = Series(np.arange(len(rng)), index=rng)\n\n key = "6 days, 23:11:12"\n indexer = rng.get_loc(key)\n assert indexer == 133\n\n result = ser[key]\n assert result == ser.iloc[133]\n\n msg = r"^Timedelta\('50 days 00:00:00'\)$"\n with pytest.raises(KeyError, match=msg):\n rng.get_loc("50 days")\n with pytest.raises(KeyError, match=msg):\n ser["50 days"]\n\n def test_getitem_bool_index_positional(self):\n # GH#48653\n ser = Series({True: 1, False: 0})\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser[0]\n assert result == 1\n\n\nclass TestSeriesGetitemSlices:\n def test_getitem_partial_str_slice_with_datetimeindex(self):\n # GH#34860\n arr = date_range("1/1/2008", "1/1/2009")\n ser = arr.to_series()\n result = ser["2008"]\n\n rng = date_range(start="2008-01-01", end="2008-12-31")\n expected = Series(rng, index=rng)\n\n tm.assert_series_equal(result, expected)\n\n def test_getitem_slice_strings_with_datetimeindex(self):\n idx = DatetimeIndex(\n ["1/1/2000", "1/2/2000", "1/2/2000", "1/3/2000", "1/4/2000"]\n )\n\n ts = Series(np.random.default_rng(2).standard_normal(len(idx)), index=idx)\n\n result = ts["1/2/2000":]\n expected = ts[1:]\n tm.assert_series_equal(result, expected)\n\n result = ts["1/2/2000":"1/3/2000"]\n expected = ts[1:4]\n tm.assert_series_equal(result, expected)\n\n def test_getitem_partial_str_slice_with_timedeltaindex(self):\n rng = timedelta_range("1 day 10:11:12", freq="h", periods=500)\n ser = Series(np.arange(len(rng)), index=rng)\n\n result = ser["5 day":"6 day"]\n expected = ser.iloc[86:134]\n tm.assert_series_equal(result, expected)\n\n result = ser["5 day":]\n expected = ser.iloc[86:]\n tm.assert_series_equal(result, expected)\n\n result = ser[:"6 day"]\n expected = ser.iloc[:134]\n tm.assert_series_equal(result, expected)\n\n def test_getitem_partial_str_slice_high_reso_with_timedeltaindex(self):\n # higher reso\n rng = timedelta_range("1 day 10:11:12", freq="us", periods=2000)\n ser = Series(np.arange(len(rng)), index=rng)\n\n result = ser["1 day 10:11:12":]\n expected = ser.iloc[0:]\n tm.assert_series_equal(result, expected)\n\n result = ser["1 day 10:11:12.001":]\n expected = ser.iloc[1000:]\n tm.assert_series_equal(result, expected)\n\n result = ser["1 days, 10:11:12.001001"]\n assert result == ser.iloc[1001]\n\n def test_getitem_slice_2d(self, datetime_series):\n # GH#30588 multi-dimensional indexing deprecated\n with pytest.raises(ValueError, match="Multi-dimensional indexing"):\n datetime_series[:, np.newaxis]\n\n def test_getitem_median_slice_bug(self):\n index = date_range("20090415", "20090519", freq="2B")\n ser = Series(np.random.default_rng(2).standard_normal(13), index=index)\n\n indexer = [slice(6, 7, None)]\n msg = "Indexing with a single-item list"\n with pytest.raises(ValueError, match=msg):\n # GH#31299\n ser[indexer]\n # but we're OK with a single-element tuple\n result = ser[(indexer[0],)]\n expected = ser[indexer[0]]\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "slc, positions",\n [\n [slice(date(2018, 1, 1), None), [0, 1, 2]],\n [slice(date(2019, 1, 2), None), [2]],\n [slice(date(2020, 1, 1), None), []],\n [slice(None, date(2020, 1, 1)), [0, 1, 2]],\n [slice(None, date(2019, 1, 1)), [0]],\n ],\n )\n def test_getitem_slice_date(self, slc, positions):\n # https://github.com/pandas-dev/pandas/issues/31501\n ser = Series(\n [0, 1, 2],\n DatetimeIndex(["2019-01-01", "2019-01-01T06:00:00", "2019-01-02"]),\n )\n result = ser[slc]\n expected = ser.take(positions)\n tm.assert_series_equal(result, expected)\n\n def test_getitem_slice_float_raises(self, datetime_series):\n msg = (\n "cannot do slice indexing on DatetimeIndex with these indexers "\n r"\[{key}\] of type float"\n )\n with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):\n datetime_series[4.0:10.0]\n\n with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):\n datetime_series[4.5:10.0]\n\n def test_getitem_slice_bug(self):\n ser = Series(range(10), index=list(range(10)))\n result = ser[-12:]\n tm.assert_series_equal(result, ser)\n\n result = ser[-7:]\n tm.assert_series_equal(result, ser[3:])\n\n result = ser[:-12]\n tm.assert_series_equal(result, ser[:0])\n\n def test_getitem_slice_integers(self):\n ser = Series(\n np.random.default_rng(2).standard_normal(8),\n index=[2, 4, 6, 8, 10, 12, 14, 16],\n )\n\n result = ser[:4]\n expected = Series(ser.values[:4], index=[2, 4, 6, 8])\n tm.assert_series_equal(result, expected)\n\n\nclass TestSeriesGetitemListLike:\n @pytest.mark.parametrize("box", [list, np.array, Index, Series])\n def test_getitem_no_matches(self, box):\n # GH#33462 we expect the same behavior for list/ndarray/Index/Series\n ser = Series(["A", "B"])\n\n key = Series(["C"])\n key = box(key)\n\n msg = r"None of \[Index\(\['C'\], dtype='object|str'\)\] are in the \[index\]"\n with pytest.raises(KeyError, match=msg):\n ser[key]\n\n def test_getitem_intlist_intindex_periodvalues(self):\n ser = Series(period_range("2000-01-01", periods=10, freq="D"))\n\n result = ser[[2, 4]]\n exp = Series(\n [pd.Period("2000-01-03", freq="D"), pd.Period("2000-01-05", freq="D")],\n index=[2, 4],\n dtype="Period[D]",\n )\n tm.assert_series_equal(result, exp)\n assert result.dtype == "Period[D]"\n\n @pytest.mark.parametrize("box", [list, np.array, Index])\n def test_getitem_intlist_intervalindex_non_int(self, box):\n # GH#33404 fall back to positional since ints are unambiguous\n dti = date_range("2000-01-03", periods=3)._with_freq(None)\n ii = pd.IntervalIndex.from_breaks(dti)\n ser = Series(range(len(ii)), index=ii)\n\n expected = ser.iloc[:1]\n key = box([0])\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser[key]\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("box", [list, np.array, Index])\n @pytest.mark.parametrize("dtype", [np.int64, np.float64, np.uint64])\n def test_getitem_intlist_multiindex_numeric_level(self, dtype, box):\n # GH#33404 do _not_ fall back to positional since ints are ambiguous\n idx = Index(range(4)).astype(dtype)\n dti = date_range("2000-01-03", periods=3)\n mi = pd.MultiIndex.from_product([idx, dti])\n ser = Series(range(len(mi))[::-1], index=mi)\n\n key = box([5])\n with pytest.raises(KeyError, match="5"):\n ser[key]\n\n def test_getitem_uint_array_key(self, any_unsigned_int_numpy_dtype):\n # GH #37218\n ser = Series([1, 2, 3])\n key = np.array([4], dtype=any_unsigned_int_numpy_dtype)\n\n with pytest.raises(KeyError, match="4"):\n ser[key]\n with pytest.raises(KeyError, match="4"):\n ser.loc[key]\n\n\nclass TestGetitemBooleanMask:\n def test_getitem_boolean(self, string_series):\n ser = string_series\n mask = ser > ser.median()\n\n # passing list is OK\n result = ser[list(mask)]\n expected = ser[mask]\n tm.assert_series_equal(result, expected)\n tm.assert_index_equal(result.index, ser.index[mask])\n\n def test_getitem_boolean_empty(self):\n ser = Series([], dtype=np.int64)\n ser.index.name = "index_name"\n ser = ser[ser.isna()]\n assert ser.index.name == "index_name"\n assert ser.dtype == np.int64\n\n # GH#5877\n # indexing with empty series\n ser = Series(["A", "B"], dtype=object)\n expected = Series(dtype=object, index=Index([], dtype="int64"))\n result = ser[Series([], dtype=object)]\n tm.assert_series_equal(result, expected)\n\n # invalid because of the boolean indexer\n # that's empty or not-aligned\n msg = (\n r"Unalignable boolean Series provided as indexer \(index of "\n r"the boolean Series and of the indexed object do not match"\n )\n with pytest.raises(IndexingError, match=msg):\n ser[Series([], dtype=bool)]\n\n with pytest.raises(IndexingError, match=msg):\n ser[Series([True], dtype=bool)]\n\n def test_getitem_boolean_object(self, string_series):\n # using column from DataFrame\n\n ser = string_series\n mask = ser > ser.median()\n omask = mask.astype(object)\n\n # getitem\n result = ser[omask]\n expected = ser[mask]\n tm.assert_series_equal(result, expected)\n\n # setitem\n s2 = ser.copy()\n cop = ser.copy()\n cop[omask] = 5\n s2[mask] = 5\n tm.assert_series_equal(cop, s2)\n\n # nans raise exception\n omask[5:10] = np.nan\n msg = "Cannot mask with non-boolean array containing NA / NaN values"\n with pytest.raises(ValueError, match=msg):\n ser[omask]\n with pytest.raises(ValueError, match=msg):\n ser[omask] = 5\n\n def test_getitem_boolean_dt64_copies(self):\n # GH#36210\n dti = date_range("2016-01-01", periods=4, tz="US/Pacific")\n key = np.array([True, True, False, False])\n\n ser = Series(dti._data)\n\n res = ser[key]\n assert res._values._ndarray.base is None\n\n # compare with numeric case for reference\n ser2 = Series(range(4))\n res2 = ser2[key]\n assert res2._values.base is None\n\n def test_getitem_boolean_corner(self, datetime_series):\n ts = datetime_series\n mask_shifted = ts.shift(1, freq=BDay()) > ts.median()\n\n msg = (\n r"Unalignable boolean Series provided as indexer \(index of "\n r"the boolean Series and of the indexed object do not match"\n )\n with pytest.raises(IndexingError, match=msg):\n ts[mask_shifted]\n\n with pytest.raises(IndexingError, match=msg):\n ts.loc[mask_shifted]\n\n def test_getitem_boolean_different_order(self, string_series):\n ordered = string_series.sort_values()\n\n sel = string_series[ordered > 0]\n exp = string_series[string_series > 0]\n tm.assert_series_equal(sel, exp)\n\n def test_getitem_boolean_contiguous_preserve_freq(self):\n rng = date_range("1/1/2000", "3/1/2000", freq="B")\n\n mask = np.zeros(len(rng), dtype=bool)\n mask[10:20] = True\n\n masked = rng[mask]\n expected = rng[10:20]\n assert expected.freq == rng.freq\n tm.assert_index_equal(masked, expected)\n\n mask[22] = True\n masked = rng[mask]\n assert masked.freq is None\n\n\nclass TestGetitemCallable:\n def test_getitem_callable(self):\n # GH#12533\n ser = Series(4, index=list("ABCD"))\n result = ser[lambda x: "A"]\n assert result == ser.loc["A"]\n\n result = ser[lambda x: ["A", "B"]]\n expected = ser.loc[["A", "B"]]\n tm.assert_series_equal(result, expected)\n\n result = ser[lambda x: [True, False, True, True]]\n expected = ser.iloc[[0, 2, 3]]\n tm.assert_series_equal(result, expected)\n\n\ndef test_getitem_generator(string_series):\n gen = (x > 0 for x in string_series)\n result = string_series[gen]\n result2 = string_series[iter(string_series > 0)]\n expected = string_series[string_series > 0]\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n\n@pytest.mark.parametrize(\n "series",\n [\n Series([0, 1]),\n Series(date_range("2012-01-01", periods=2)),\n Series(date_range("2012-01-01", periods=2, tz="CET")),\n ],\n)\ndef test_getitem_ndim_deprecated(series):\n with pytest.raises(ValueError, match="Multi-dimensional indexing"):\n series[:, None]\n\n\ndef test_getitem_multilevel_scalar_slice_not_implemented(\n multiindex_year_month_day_dataframe_random_data,\n):\n # not implementing this for now\n df = multiindex_year_month_day_dataframe_random_data\n ser = df["A"]\n\n msg = r"\(2000, slice\(3, 4, None\)\)"\n with pytest.raises(TypeError, match=msg):\n ser[2000, 3:4]\n\n\ndef test_getitem_dataframe_raises():\n rng = list(range(10))\n ser = Series(10, index=rng)\n df = DataFrame(rng, index=rng)\n msg = (\n "Indexing a Series with DataFrame is not supported, "\n "use the appropriate DataFrame column"\n )\n with pytest.raises(TypeError, match=msg):\n ser[df > 5]\n\n\ndef test_getitem_assignment_series_alignment():\n # https://github.com/pandas-dev/pandas/issues/37427\n # with getitem, when assigning with a Series, it is not first aligned\n ser = Series(range(10))\n idx = np.array([2, 4, 9])\n ser[idx] = Series([10, 11, 12])\n expected = Series([0, 1, 10, 3, 11, 5, 6, 7, 8, 12])\n tm.assert_series_equal(ser, expected)\n\n\ndef test_getitem_duplicate_index_mistyped_key_raises_keyerror():\n # GH#29189 float_index.get_loc(None) should raise KeyError, not TypeError\n ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])\n with pytest.raises(KeyError, match="None"):\n ser[None]\n\n with pytest.raises(KeyError, match="None"):\n ser.index.get_loc(None)\n\n with pytest.raises(KeyError, match="None"):\n ser.index._engine.get_loc(None)\n\n\ndef test_getitem_1tuple_slice_without_multiindex():\n ser = Series(range(5))\n key = (slice(3),)\n\n result = ser[key]\n expected = ser[key[0]]\n tm.assert_series_equal(result, expected)\n\n\ndef test_getitem_preserve_name(datetime_series):\n result = datetime_series[datetime_series > 0]\n assert result.name == datetime_series.name\n\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = datetime_series[[0, 2, 4]]\n assert result.name == datetime_series.name\n\n result = datetime_series[5:10]\n assert result.name == datetime_series.name\n\n\ndef test_getitem_with_integer_labels():\n # integer indexes, be careful\n ser = Series(\n np.random.default_rng(2).standard_normal(10), index=list(range(0, 20, 2))\n )\n inds = [0, 2, 5, 7, 8]\n arr_inds = np.array([0, 2, 5, 7, 8])\n with pytest.raises(KeyError, match="not in index"):\n ser[inds]\n\n with pytest.raises(KeyError, match="not in index"):\n ser[arr_inds]\n\n\ndef test_getitem_missing(datetime_series):\n # missing\n d = datetime_series.index[0] - BDay()\n msg = r"Timestamp\('1999-12-31 00:00:00'\)"\n with pytest.raises(KeyError, match=msg):\n datetime_series[d]\n\n\ndef test_getitem_fancy(string_series, object_series):\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n slice1 = string_series[[1, 2, 3]]\n slice2 = object_series[[1, 2, 3]]\n assert string_series.index[2] == slice1.index[1]\n assert object_series.index[2] == slice2.index[1]\n assert string_series.iloc[2] == slice1.iloc[1]\n assert object_series.iloc[2] == slice2.iloc[1]\n\n\ndef test_getitem_box_float64(datetime_series):\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n value = datetime_series[5]\n assert isinstance(value, np.float64)\n\n\ndef test_getitem_unordered_dup():\n obj = Series(range(5), index=["c", "a", "a", "b", "b"])\n assert is_scalar(obj["c"])\n assert obj["c"] == 0\n\n\ndef test_getitem_dups():\n ser = Series(range(5), index=["A", "A", "B", "C", "C"], dtype=np.int64)\n expected = Series([3, 4], index=["C", "C"], dtype=np.int64)\n result = ser["C"]\n tm.assert_series_equal(result, expected)\n\n\ndef test_getitem_categorical_str():\n # GH#31765\n ser = Series(range(5), index=Categorical(["a", "b", "c", "a", "b"]))\n result = ser["a"]\n expected = ser.iloc[[0, 3]]\n tm.assert_series_equal(result, expected)\n\n\ndef test_slice_can_reorder_not_uniquely_indexed():\n ser = Series(1, index=["a", "a", "b", "b", "c"])\n ser[::-1] # it works!\n\n\n@pytest.mark.parametrize("index_vals", ["aabcd", "aadcb"])\ndef test_duplicated_index_getitem_positional_indexer(index_vals):\n # GH 11747\n s = Series(range(5), index=list(index_vals))\n\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s[3]\n assert result == 3\n\n\nclass TestGetitemDeprecatedIndexers:\n @pytest.mark.parametrize("key", [{1}, {1: 1}])\n def test_getitem_dict_and_set_deprecated(self, key):\n # GH#42825 enforced in 2.0\n ser = Series([1, 2, 3])\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n ser[key]\n\n @pytest.mark.parametrize("key", [{1}, {1: 1}])\n def test_setitem_dict_and_set_disallowed(self, key):\n # GH#42825 enforced in 2.0\n ser = Series([1, 2, 3])\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n ser[key] = 1\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_getitem.py | test_getitem.py | Python | 24,390 | 0.95 | 0.096862 | 0.091068 | node-utils | 386 | 2023-08-04T12:33:01.341823 | MIT | true | 90c33aea4a204d012895481c8cd059e3 |
""" test get/set & misc """\nfrom datetime import timedelta\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import IndexingError\n\nfrom pandas import (\n NA,\n DataFrame,\n Index,\n IndexSlice,\n MultiIndex,\n NaT,\n Series,\n Timedelta,\n Timestamp,\n concat,\n date_range,\n isna,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\ndef test_basic_indexing():\n s = Series(\n np.random.default_rng(2).standard_normal(5), index=["a", "b", "a", "a", "b"]\n )\n\n warn_msg = "Series.__[sg]etitem__ treating keys as positions is deprecated"\n msg = "index 5 is out of bounds for axis 0 with size 5"\n with pytest.raises(IndexError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n s[5]\n with pytest.raises(IndexError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n s[5] = 0\n\n with pytest.raises(KeyError, match=r"^'c'$"):\n s["c"]\n\n s = s.sort_index()\n\n with pytest.raises(IndexError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n s[5]\n msg = r"index 5 is out of bounds for axis (0|1) with size 5|^5$"\n with pytest.raises(IndexError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n s[5] = 0\n\n\ndef test_getitem_numeric_should_not_fallback_to_positional(any_numeric_dtype):\n # GH51053\n dtype = any_numeric_dtype\n idx = Index([1, 0, 1], dtype=dtype)\n ser = Series(range(3), index=idx)\n result = ser[1]\n expected = Series([0, 2], index=Index([1, 1], dtype=dtype))\n tm.assert_series_equal(result, expected, check_exact=True)\n\n\ndef test_setitem_numeric_should_not_fallback_to_positional(any_numeric_dtype):\n # GH51053\n dtype = any_numeric_dtype\n idx = Index([1, 0, 1], dtype=dtype)\n ser = Series(range(3), index=idx)\n ser[1] = 10\n expected = Series([10, 1, 10], index=idx)\n tm.assert_series_equal(ser, expected, check_exact=True)\n\n\ndef test_basic_getitem_with_labels(datetime_series):\n indices = datetime_series.index[[5, 10, 15]]\n\n result = datetime_series[indices]\n expected = datetime_series.reindex(indices)\n tm.assert_series_equal(result, expected)\n\n result = datetime_series[indices[0] : indices[2]]\n expected = datetime_series.loc[indices[0] : indices[2]]\n tm.assert_series_equal(result, expected)\n\n\ndef test_basic_getitem_dt64tz_values():\n # GH12089\n # with tz for values\n ser = Series(\n date_range("2011-01-01", periods=3, tz="US/Eastern"), index=["a", "b", "c"]\n )\n expected = Timestamp("2011-01-01", tz="US/Eastern")\n result = ser.loc["a"]\n assert result == expected\n result = ser.iloc[0]\n assert result == expected\n result = ser["a"]\n assert result == expected\n\n\ndef test_getitem_setitem_ellipsis(using_copy_on_write, warn_copy_on_write):\n s = Series(np.random.default_rng(2).standard_normal(10))\n\n result = s[...]\n tm.assert_series_equal(result, s)\n\n with tm.assert_cow_warning(warn_copy_on_write):\n s[...] = 5\n if not using_copy_on_write:\n assert (result == 5).all()\n\n\n@pytest.mark.parametrize(\n "result_1, duplicate_item, expected_1",\n [\n [\n Series({1: 12, 2: [1, 2, 2, 3]}),\n Series({1: 313}),\n Series({1: 12}, dtype=object),\n ],\n [\n Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),\n Series({1: [1, 2, 3]}),\n Series({1: [1, 2, 3]}),\n ],\n ],\n)\ndef test_getitem_with_duplicates_indices(result_1, duplicate_item, expected_1):\n # GH 17610\n result = result_1._append(duplicate_item)\n expected = expected_1._append(duplicate_item)\n tm.assert_series_equal(result[1], expected)\n assert result[2] == result_1[2]\n\n\ndef test_getitem_setitem_integers():\n # caused bug without test\n s = Series([1, 2, 3], ["a", "b", "c"])\n\n assert s.iloc[0] == s["a"]\n s.iloc[0] = 5\n tm.assert_almost_equal(s["a"], 5)\n\n\ndef test_series_box_timestamp():\n rng = date_range("20090415", "20090519", freq="B")\n ser = Series(rng)\n assert isinstance(ser[0], Timestamp)\n assert isinstance(ser.at[1], Timestamp)\n assert isinstance(ser.iat[2], Timestamp)\n assert isinstance(ser.loc[3], Timestamp)\n assert isinstance(ser.iloc[4], Timestamp)\n\n ser = Series(rng, index=rng)\n msg = "Series.__getitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert isinstance(ser[0], Timestamp)\n assert isinstance(ser.at[rng[1]], Timestamp)\n assert isinstance(ser.iat[2], Timestamp)\n assert isinstance(ser.loc[rng[3]], Timestamp)\n assert isinstance(ser.iloc[4], Timestamp)\n\n\ndef test_series_box_timedelta():\n rng = timedelta_range("1 day 1 s", periods=5, freq="h")\n ser = Series(rng)\n assert isinstance(ser[0], Timedelta)\n assert isinstance(ser.at[1], Timedelta)\n assert isinstance(ser.iat[2], Timedelta)\n assert isinstance(ser.loc[3], Timedelta)\n assert isinstance(ser.iloc[4], Timedelta)\n\n\ndef test_getitem_ambiguous_keyerror(indexer_sl):\n ser = Series(range(10), index=list(range(0, 20, 2)))\n with pytest.raises(KeyError, match=r"^1$"):\n indexer_sl(ser)[1]\n\n\ndef test_getitem_dups_with_missing(indexer_sl):\n # breaks reindex, so need to use .loc internally\n # GH 4246\n ser = Series([1, 2, 3, 4], ["foo", "bar", "foo", "bah"])\n with pytest.raises(KeyError, match=re.escape("['bam'] not in index")):\n indexer_sl(ser)[["foo", "bar", "bah", "bam"]]\n\n\ndef test_setitem_ambiguous_keyerror(indexer_sl):\n s = Series(range(10), index=list(range(0, 20, 2)))\n\n # equivalent of an append\n s2 = s.copy()\n indexer_sl(s2)[1] = 5\n expected = concat([s, Series([5], index=[1])])\n tm.assert_series_equal(s2, expected)\n\n\ndef test_setitem(datetime_series):\n datetime_series[datetime_series.index[5]] = np.nan\n datetime_series.iloc[[1, 2, 17]] = np.nan\n datetime_series.iloc[6] = np.nan\n assert np.isnan(datetime_series.iloc[6])\n assert np.isnan(datetime_series.iloc[2])\n datetime_series[np.isnan(datetime_series)] = 5\n assert not np.isnan(datetime_series.iloc[2])\n\n\ndef test_setslice(datetime_series):\n sl = datetime_series[5:20]\n assert len(sl) == len(sl.index)\n assert sl.index.is_unique is True\n\n\ndef test_basic_getitem_setitem_corner(datetime_series):\n # invalid tuples, e.g. td.ts[:, None] vs. td.ts[:, 2]\n msg = "key of type tuple not found and not a MultiIndex"\n with pytest.raises(KeyError, match=msg):\n datetime_series[:, 2]\n with pytest.raises(KeyError, match=msg):\n datetime_series[:, 2] = 2\n\n # weird lists. [slice(0, 5)] raises but not two slices\n msg = "Indexing with a single-item list"\n with pytest.raises(ValueError, match=msg):\n # GH#31299\n datetime_series[[slice(None, 5)]]\n\n # but we're OK with a single-element tuple\n result = datetime_series[(slice(None, 5),)]\n expected = datetime_series[:5]\n tm.assert_series_equal(result, expected)\n\n # OK\n msg = r"unhashable type(: 'slice')?"\n with pytest.raises(TypeError, match=msg):\n datetime_series[[5, [None, None]]]\n with pytest.raises(TypeError, match=msg):\n datetime_series[[5, [None, None]]] = 2\n\n\ndef test_slice(string_series, object_series, using_copy_on_write, warn_copy_on_write):\n original = string_series.copy()\n numSlice = string_series[10:20]\n numSliceEnd = string_series[-10:]\n objSlice = object_series[10:20]\n\n assert string_series.index[9] not in numSlice.index\n assert object_series.index[9] not in objSlice.index\n\n assert len(numSlice) == len(numSlice.index)\n assert string_series[numSlice.index[0]] == numSlice[numSlice.index[0]]\n\n assert numSlice.index[1] == string_series.index[11]\n tm.assert_numpy_array_equal(np.array(numSliceEnd), np.array(string_series)[-10:])\n\n # Test return view.\n sl = string_series[10:20]\n with tm.assert_cow_warning(warn_copy_on_write):\n sl[:] = 0\n\n if using_copy_on_write:\n # Doesn't modify parent (CoW)\n tm.assert_series_equal(string_series, original)\n else:\n assert (string_series[10:20] == 0).all()\n\n\ndef test_timedelta_assignment():\n # GH 8209\n s = Series([], dtype=object)\n s.loc["B"] = timedelta(1)\n expected = Series(\n Timedelta("1 days"), dtype="timedelta64[ns]", index=Index(["B"], dtype=object)\n )\n tm.assert_series_equal(s, expected)\n\n s = s.reindex(s.index.insert(0, "A"))\n expected = Series(\n [np.nan, Timedelta("1 days")],\n dtype="timedelta64[ns]",\n index=Index(["A", "B"], dtype=object),\n )\n tm.assert_series_equal(s, expected)\n\n s.loc["A"] = timedelta(1)\n expected = Series(\n Timedelta("1 days"),\n dtype="timedelta64[ns]",\n index=Index(["A", "B"], dtype=object),\n )\n tm.assert_series_equal(s, expected)\n\n\ndef test_underlying_data_conversion(using_copy_on_write):\n # GH 4080\n df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]})\n return_value = df.set_index(["a", "b", "c"], inplace=True)\n assert return_value is None\n s = Series([1], index=[(2, 2, 2)])\n df["val"] = 0\n df_original = df.copy()\n df\n\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n df["val"].update(s)\n expected = df_original\n else:\n with tm.assert_produces_warning(FutureWarning, match="inplace method"):\n df["val"].update(s)\n expected = DataFrame(\n {"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]}\n )\n return_value = expected.set_index(["a", "b", "c"], inplace=True)\n assert return_value is None\n tm.assert_frame_equal(df, expected)\n\n\ndef test_preserve_refs(datetime_series):\n seq = datetime_series.iloc[[5, 10, 15]]\n seq.iloc[1] = np.nan\n assert not np.isnan(datetime_series.iloc[10])\n\n\ndef test_multilevel_preserve_name(lexsorted_two_level_string_multiindex, indexer_sl):\n index = lexsorted_two_level_string_multiindex\n ser = Series(\n np.random.default_rng(2).standard_normal(len(index)), index=index, name="sth"\n )\n\n result = indexer_sl(ser)["foo"]\n assert result.name == ser.name\n\n\n# miscellaneous methods\n\n\n@pytest.mark.parametrize(\n "index",\n [\n date_range("2014-01-01", periods=20, freq="MS"),\n period_range("2014-01", periods=20, freq="M"),\n timedelta_range("0", periods=20, freq="h"),\n ],\n)\ndef test_slice_with_negative_step(index):\n keystr1 = str(index[9])\n keystr2 = str(index[13])\n\n ser = Series(np.arange(20), index)\n SLC = IndexSlice\n\n for key in [keystr1, index[9]]:\n tm.assert_indexing_slices_equivalent(ser, SLC[key::-1], SLC[9::-1])\n tm.assert_indexing_slices_equivalent(ser, SLC[:key:-1], SLC[:8:-1])\n\n for key2 in [keystr2, index[13]]:\n tm.assert_indexing_slices_equivalent(ser, SLC[key2:key:-1], SLC[13:8:-1])\n tm.assert_indexing_slices_equivalent(ser, SLC[key:key2:-1], SLC[0:0:-1])\n\n\ndef test_tuple_index():\n # GH 35534 - Selecting values when a Series has an Index of tuples\n s = Series([1, 2], index=[("a",), ("b",)])\n assert s[("a",)] == 1\n assert s[("b",)] == 2\n s[("b",)] = 3\n assert s[("b",)] == 3\n\n\ndef test_frozenset_index():\n # GH35747 - Selecting values when a Series has an Index of frozenset\n idx0, idx1 = frozenset("a"), frozenset("b")\n s = Series([1, 2], index=[idx0, idx1])\n assert s[idx0] == 1\n assert s[idx1] == 2\n s[idx1] = 3\n assert s[idx1] == 3\n\n\ndef test_loc_setitem_all_false_indexer():\n # GH#45778\n ser = Series([1, 2], index=["a", "b"])\n expected = ser.copy()\n rhs = Series([6, 7], index=["a", "b"])\n ser.loc[ser > 100] = rhs\n tm.assert_series_equal(ser, expected)\n\n\ndef test_loc_boolean_indexer_non_matching_index():\n # GH#46551\n ser = Series([1])\n result = ser.loc[Series([NA, False], dtype="boolean")]\n expected = Series([], dtype="int64")\n tm.assert_series_equal(result, expected)\n\n\ndef test_loc_boolean_indexer_miss_matching_index():\n # GH#46551\n ser = Series([1])\n indexer = Series([NA, False], dtype="boolean", index=[1, 2])\n with pytest.raises(IndexingError, match="Unalignable"):\n ser.loc[indexer]\n\n\ndef test_loc_setitem_nested_data_enlargement():\n # GH#48614\n df = DataFrame({"a": [1]})\n ser = Series({"label": df})\n ser.loc["new_label"] = df\n expected = Series({"label": df, "new_label": df})\n tm.assert_series_equal(ser, expected)\n\n\ndef test_loc_ea_numeric_index_oob_slice_end():\n # GH#50161\n ser = Series(1, index=Index([0, 1, 2], dtype="Int64"))\n result = ser.loc[2:3]\n expected = Series(1, index=Index([2], dtype="Int64"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_getitem_bool_int_key():\n # GH#48653\n ser = Series({True: 1, False: 0})\n with pytest.raises(KeyError, match="0"):\n ser.loc[0]\n\n\n@pytest.mark.parametrize("val", [{}, {"b": "x"}])\n@pytest.mark.parametrize("indexer", [[], [False, False], slice(0, -1), np.array([])])\ndef test_setitem_empty_indexer(indexer, val):\n # GH#45981\n df = DataFrame({"a": [1, 2], **val})\n expected = df.copy()\n df.loc[indexer] = 1.5\n tm.assert_frame_equal(df, expected)\n\n\nclass TestDeprecatedIndexers:\n @pytest.mark.parametrize("key", [{1}, {1: 1}])\n def test_getitem_dict_and_set_deprecated(self, key):\n # GH#42825 enforced in 2.0\n ser = Series([1, 2])\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n ser.loc[key]\n\n @pytest.mark.parametrize("key", [{1}, {1: 1}, ({1}, 2), ({1: 1}, 2)])\n def test_getitem_dict_and_set_deprecated_multiindex(self, key):\n # GH#42825 enforced in 2.0\n ser = Series([1, 2], index=MultiIndex.from_tuples([(1, 2), (3, 4)]))\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n ser.loc[key]\n\n @pytest.mark.parametrize("key", [{1}, {1: 1}])\n def test_setitem_dict_and_set_disallowed(self, key):\n # GH#42825 enforced in 2.0\n ser = Series([1, 2])\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n ser.loc[key] = 1\n\n @pytest.mark.parametrize("key", [{1}, {1: 1}, ({1}, 2), ({1: 1}, 2)])\n def test_setitem_dict_and_set_disallowed_multiindex(self, key):\n # GH#42825 enforced in 2.0\n ser = Series([1, 2], index=MultiIndex.from_tuples([(1, 2), (3, 4)]))\n with pytest.raises(TypeError, match="as an indexer is not supported"):\n ser.loc[key] = 1\n\n\nclass TestSetitemValidation:\n # This is adapted from pandas/tests/arrays/masked/test_indexing.py\n # but checks for warnings instead of errors.\n def _check_setitem_invalid(self, ser, invalid, indexer, warn):\n msg = "Setting an item of incompatible dtype is deprecated"\n msg = re.escape(msg)\n\n orig_ser = ser.copy()\n\n with tm.assert_produces_warning(warn, match=msg):\n ser[indexer] = invalid\n ser = orig_ser.copy()\n\n with tm.assert_produces_warning(warn, match=msg):\n ser.iloc[indexer] = invalid\n ser = orig_ser.copy()\n\n with tm.assert_produces_warning(warn, match=msg):\n ser.loc[indexer] = invalid\n ser = orig_ser.copy()\n\n with tm.assert_produces_warning(warn, match=msg):\n ser[:] = invalid\n\n _invalid_scalars = [\n 1 + 2j,\n "True",\n "1",\n "1.0",\n NaT,\n np.datetime64("NaT"),\n np.timedelta64("NaT"),\n ]\n _indexers = [0, [0], slice(0, 1), [True, False, False], slice(None, None, None)]\n\n @pytest.mark.parametrize(\n "invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)]\n )\n @pytest.mark.parametrize("indexer", _indexers)\n def test_setitem_validation_scalar_bool(self, invalid, indexer):\n ser = Series([True, False, False], dtype="bool")\n self._check_setitem_invalid(ser, invalid, indexer, FutureWarning)\n\n @pytest.mark.parametrize("invalid", _invalid_scalars + [True, 1.5, np.float64(1.5)])\n @pytest.mark.parametrize("indexer", _indexers)\n def test_setitem_validation_scalar_int(self, invalid, any_int_numpy_dtype, indexer):\n ser = Series([1, 2, 3], dtype=any_int_numpy_dtype)\n if isna(invalid) and invalid is not NaT and not np.isnat(invalid):\n warn = None\n else:\n warn = FutureWarning\n self._check_setitem_invalid(ser, invalid, indexer, warn)\n\n @pytest.mark.parametrize("invalid", _invalid_scalars + [True])\n @pytest.mark.parametrize("indexer", _indexers)\n def test_setitem_validation_scalar_float(self, invalid, float_numpy_dtype, indexer):\n ser = Series([1, 2, None], dtype=float_numpy_dtype)\n self._check_setitem_invalid(ser, invalid, indexer, FutureWarning)\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_indexing.py | test_indexing.py | Python | 16,941 | 0.95 | 0.098113 | 0.08076 | node-utils | 137 | 2024-09-10T21:11:43.369928 | Apache-2.0 | true | 4daab0cdc52df306a014fd492c9dadff |
import numpy as np\nimport pytest\n\nfrom pandas import Series\nimport pandas._testing as tm\n\n\ndef test_mask():\n # compare with tested results in test_where\n s = Series(np.random.default_rng(2).standard_normal(5))\n cond = s > 0\n\n rs = s.where(~cond, np.nan)\n tm.assert_series_equal(rs, s.mask(cond))\n\n rs = s.where(~cond)\n rs2 = s.mask(cond)\n tm.assert_series_equal(rs, rs2)\n\n rs = s.where(~cond, -s)\n rs2 = s.mask(cond, -s)\n tm.assert_series_equal(rs, rs2)\n\n cond = Series([True, False, False, True, False], index=s.index)\n s2 = -(s.abs())\n rs = s2.where(~cond[:3])\n rs2 = s2.mask(cond[:3])\n tm.assert_series_equal(rs, rs2)\n\n rs = s2.where(~cond[:3], -s2)\n rs2 = s2.mask(cond[:3], -s2)\n tm.assert_series_equal(rs, rs2)\n\n msg = "Array conditional must be same shape as self"\n with pytest.raises(ValueError, match=msg):\n s.mask(1)\n with pytest.raises(ValueError, match=msg):\n s.mask(cond[:3].values, -s)\n\n\ndef test_mask_casts():\n # dtype changes\n ser = Series([1, 2, 3, 4])\n result = ser.mask(ser > 2, np.nan)\n expected = Series([1, 2, np.nan, np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_mask_casts2():\n # see gh-21891\n ser = Series([1, 2])\n res = ser.mask([True, False])\n\n exp = Series([np.nan, 2])\n tm.assert_series_equal(res, exp)\n\n\ndef test_mask_inplace():\n s = Series(np.random.default_rng(2).standard_normal(5))\n cond = s > 0\n\n rs = s.copy()\n rs.mask(cond, inplace=True)\n tm.assert_series_equal(rs.dropna(), s[~cond])\n tm.assert_series_equal(rs, s.mask(cond))\n\n rs = s.copy()\n rs.mask(cond, -s, inplace=True)\n tm.assert_series_equal(rs, s.mask(cond, -s))\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_mask.py | test_mask.py | Python | 1,711 | 0.95 | 0.057971 | 0.058824 | vue-tools | 819 | 2024-11-11T07:51:00.599207 | GPL-3.0 | true | 92de437d1e6f841e46b9fffe35b645e4 |
from datetime import (\n date,\n datetime,\n)\nfrom decimal import Decimal\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import (\n np_version_gt2,\n np_version_gte1p24,\n)\nfrom pandas.errors import IndexingError\n\nfrom pandas.core.dtypes.common import is_list_like\n\nfrom pandas import (\n NA,\n Categorical,\n DataFrame,\n DatetimeIndex,\n Index,\n Interval,\n IntervalIndex,\n MultiIndex,\n NaT,\n Period,\n Series,\n Timedelta,\n Timestamp,\n array,\n concat,\n date_range,\n interval_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries.offsets import BDay\n\n\nclass TestSetitemDT64Values:\n def test_setitem_none_nan(self):\n series = Series(date_range("1/1/2000", periods=10))\n series[3] = None\n assert series[3] is NaT\n\n series[3:5] = None\n assert series[4] is NaT\n\n series[5] = np.nan\n assert series[5] is NaT\n\n series[5:7] = np.nan\n assert series[6] is NaT\n\n def test_setitem_multiindex_empty_slice(self):\n # https://github.com/pandas-dev/pandas/issues/35878\n idx = MultiIndex.from_tuples([("a", 1), ("b", 2)])\n result = Series([1, 2], index=idx)\n expected = result.copy()\n result.loc[[]] = 0\n tm.assert_series_equal(result, expected)\n\n def test_setitem_with_string_index(self):\n # GH#23451\n # Set object dtype to avoid upcast when setting date.today()\n ser = Series([1, 2, 3], index=["Date", "b", "other"], dtype=object)\n ser["Date"] = date.today()\n assert ser.Date == date.today()\n assert ser["Date"] == date.today()\n\n def test_setitem_tuple_with_datetimetz_values(self):\n # GH#20441\n arr = date_range("2017", periods=4, tz="US/Eastern")\n index = [(0, 1), (0, 2), (0, 3), (0, 4)]\n result = Series(arr, index=index)\n expected = result.copy()\n result[(0, 1)] = np.nan\n expected.iloc[0] = np.nan\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("tz", ["US/Eastern", "UTC", "Asia/Tokyo"])\n def test_setitem_with_tz(self, tz, indexer_sli):\n orig = Series(date_range("2016-01-01", freq="h", periods=3, tz=tz))\n assert orig.dtype == f"datetime64[ns, {tz}]"\n\n exp = Series(\n [\n Timestamp("2016-01-01 00:00", tz=tz),\n Timestamp("2011-01-01 00:00", tz=tz),\n Timestamp("2016-01-01 02:00", tz=tz),\n ],\n dtype=orig.dtype,\n )\n\n # scalar\n ser = orig.copy()\n indexer_sli(ser)[1] = Timestamp("2011-01-01", tz=tz)\n tm.assert_series_equal(ser, exp)\n\n # vector\n vals = Series(\n [Timestamp("2011-01-01", tz=tz), Timestamp("2012-01-01", tz=tz)],\n index=[1, 2],\n dtype=orig.dtype,\n )\n assert vals.dtype == f"datetime64[ns, {tz}]"\n\n exp = Series(\n [\n Timestamp("2016-01-01 00:00", tz=tz),\n Timestamp("2011-01-01 00:00", tz=tz),\n Timestamp("2012-01-01 00:00", tz=tz),\n ],\n dtype=orig.dtype,\n )\n\n ser = orig.copy()\n indexer_sli(ser)[[1, 2]] = vals\n tm.assert_series_equal(ser, exp)\n\n def test_setitem_with_tz_dst(self, indexer_sli):\n # GH#14146 trouble setting values near DST boundary\n tz = "US/Eastern"\n orig = Series(date_range("2016-11-06", freq="h", periods=3, tz=tz))\n assert orig.dtype == f"datetime64[ns, {tz}]"\n\n exp = Series(\n [\n Timestamp("2016-11-06 00:00-04:00", tz=tz),\n Timestamp("2011-01-01 00:00-05:00", tz=tz),\n Timestamp("2016-11-06 01:00-05:00", tz=tz),\n ],\n dtype=orig.dtype,\n )\n\n # scalar\n ser = orig.copy()\n indexer_sli(ser)[1] = Timestamp("2011-01-01", tz=tz)\n tm.assert_series_equal(ser, exp)\n\n # vector\n vals = Series(\n [Timestamp("2011-01-01", tz=tz), Timestamp("2012-01-01", tz=tz)],\n index=[1, 2],\n dtype=orig.dtype,\n )\n assert vals.dtype == f"datetime64[ns, {tz}]"\n\n exp = Series(\n [\n Timestamp("2016-11-06 00:00", tz=tz),\n Timestamp("2011-01-01 00:00", tz=tz),\n Timestamp("2012-01-01 00:00", tz=tz),\n ],\n dtype=orig.dtype,\n )\n\n ser = orig.copy()\n indexer_sli(ser)[[1, 2]] = vals\n tm.assert_series_equal(ser, exp)\n\n def test_object_series_setitem_dt64array_exact_match(self):\n # make sure the dt64 isn't cast by numpy to integers\n # https://github.com/numpy/numpy/issues/12550\n\n ser = Series({"X": np.nan}, dtype=object)\n\n indexer = [True]\n\n # "exact_match" -> size of array being set matches size of ser\n value = np.array([4], dtype="M8[ns]")\n\n ser.iloc[indexer] = value\n\n expected = Series([value[0]], index=["X"], dtype=object)\n assert all(isinstance(x, np.datetime64) for x in expected.values)\n\n tm.assert_series_equal(ser, expected)\n\n\nclass TestSetitemScalarIndexer:\n def test_setitem_negative_out_of_bounds(self):\n ser = Series(["a"] * 10, index=["a"] * 10)\n\n # string index falls back to positional\n msg = "index -11|-1 is out of bounds for axis 0 with size 10"\n warn_msg = "Series.__setitem__ treating keys as positions is deprecated"\n with pytest.raises(IndexError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n ser[-11] = "foo"\n\n @pytest.mark.parametrize("indexer", [tm.loc, tm.at])\n @pytest.mark.parametrize("ser_index", [0, 1])\n def test_setitem_series_object_dtype(self, indexer, ser_index):\n # GH#38303\n ser = Series([0, 0], dtype="object")\n idxr = indexer(ser)\n idxr[0] = Series([42], index=[ser_index])\n expected = Series([Series([42], index=[ser_index]), 0], dtype="object")\n tm.assert_series_equal(ser, expected)\n\n @pytest.mark.parametrize("index, exp_value", [(0, 42), (1, np.nan)])\n def test_setitem_series(self, index, exp_value):\n # GH#38303\n ser = Series([0, 0])\n ser.loc[0] = Series([42], index=[index])\n expected = Series([exp_value, 0])\n tm.assert_series_equal(ser, expected)\n\n\nclass TestSetitemSlices:\n def test_setitem_slice_float_raises(self, datetime_series):\n msg = (\n "cannot do slice indexing on DatetimeIndex with these indexers "\n r"\[{key}\] of type float"\n )\n with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):\n datetime_series[4.0:10.0] = 0\n\n with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):\n datetime_series[4.5:10.0] = 0\n\n def test_setitem_slice(self):\n ser = Series(range(10), index=list(range(10)))\n ser[-12:] = 0\n assert (ser == 0).all()\n\n ser[:-12] = 5\n assert (ser == 0).all()\n\n def test_setitem_slice_integers(self):\n ser = Series(\n np.random.default_rng(2).standard_normal(8),\n index=[2, 4, 6, 8, 10, 12, 14, 16],\n )\n\n ser[:4] = 0\n assert (ser[:4] == 0).all()\n assert not (ser[4:] == 0).any()\n\n def test_setitem_slicestep(self):\n # caught this bug when writing tests\n series = Series(\n np.arange(20, dtype=np.float64), index=np.arange(20, dtype=np.int64)\n )\n\n series[::2] = 0\n assert (series[::2] == 0).all()\n\n def test_setitem_multiindex_slice(self, indexer_sli):\n # GH 8856\n mi = MultiIndex.from_product(([0, 1], list("abcde")))\n result = Series(np.arange(10, dtype=np.int64), mi)\n indexer_sli(result)[::4] = 100\n expected = Series([100, 1, 2, 3, 100, 5, 6, 7, 100, 9], mi)\n tm.assert_series_equal(result, expected)\n\n\nclass TestSetitemBooleanMask:\n def test_setitem_mask_cast(self):\n # GH#2746\n # need to upcast\n ser = Series([1, 2], index=[1, 2], dtype="int64")\n ser[[True, False]] = Series([0], index=[1], dtype="int64")\n expected = Series([0, 2], index=[1, 2], dtype="int64")\n\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_mask_align_and_promote(self):\n # GH#8387: test that changing types does not break alignment\n ts = Series(\n np.random.default_rng(2).standard_normal(100), index=np.arange(100, 0, -1)\n ).round(5)\n mask = ts > 0\n left = ts.copy()\n right = ts[mask].copy().map(str)\n with tm.assert_produces_warning(\n FutureWarning, match="item of incompatible dtype"\n ):\n left[mask] = right\n expected = ts.map(lambda t: str(t) if t > 0 else t)\n tm.assert_series_equal(left, expected)\n\n def test_setitem_mask_promote_strs(self):\n ser = Series([0, 1, 2, 0])\n mask = ser > 0\n ser2 = ser[mask].map(str)\n with tm.assert_produces_warning(\n FutureWarning, match="item of incompatible dtype"\n ):\n ser[mask] = ser2\n\n expected = Series([0, "1", "2", 0])\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_mask_promote(self):\n ser = Series([0, "foo", "bar", 0])\n mask = Series([False, True, True, False])\n ser2 = ser[mask]\n ser[mask] = ser2\n\n expected = Series([0, "foo", "bar", 0])\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_boolean(self, string_series):\n mask = string_series > string_series.median()\n\n # similar indexed series\n result = string_series.copy()\n result[mask] = string_series * 2\n expected = string_series * 2\n tm.assert_series_equal(result[mask], expected[mask])\n\n # needs alignment\n result = string_series.copy()\n result[mask] = (string_series * 2)[0:5]\n expected = (string_series * 2)[0:5].reindex_like(string_series)\n expected[-mask] = string_series[mask]\n tm.assert_series_equal(result[mask], expected[mask])\n\n def test_setitem_boolean_corner(self, datetime_series):\n ts = datetime_series\n mask_shifted = ts.shift(1, freq=BDay()) > ts.median()\n\n msg = (\n r"Unalignable boolean Series provided as indexer \(index of "\n r"the boolean Series and of the indexed object do not match"\n )\n with pytest.raises(IndexingError, match=msg):\n ts[mask_shifted] = 1\n\n with pytest.raises(IndexingError, match=msg):\n ts.loc[mask_shifted] = 1\n\n def test_setitem_boolean_different_order(self, string_series):\n ordered = string_series.sort_values()\n\n copy = string_series.copy()\n copy[ordered > 0] = 0\n\n expected = string_series.copy()\n expected[expected > 0] = 0\n\n tm.assert_series_equal(copy, expected)\n\n @pytest.mark.parametrize("func", [list, np.array, Series])\n def test_setitem_boolean_python_list(self, func):\n # GH19406\n ser = Series([None, "b", None])\n mask = func([True, False, True])\n ser[mask] = ["a", "c"]\n expected = Series(["a", "b", "c"])\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_boolean_nullable_int_types(self, any_numeric_ea_dtype):\n # GH: 26468\n ser = Series([5, 6, 7, 8], dtype=any_numeric_ea_dtype)\n ser[ser > 6] = Series(range(4), dtype=any_numeric_ea_dtype)\n expected = Series([5, 6, 2, 3], dtype=any_numeric_ea_dtype)\n tm.assert_series_equal(ser, expected)\n\n ser = Series([5, 6, 7, 8], dtype=any_numeric_ea_dtype)\n ser.loc[ser > 6] = Series(range(4), dtype=any_numeric_ea_dtype)\n tm.assert_series_equal(ser, expected)\n\n ser = Series([5, 6, 7, 8], dtype=any_numeric_ea_dtype)\n loc_ser = Series(range(4), dtype=any_numeric_ea_dtype)\n ser.loc[ser > 6] = loc_ser.loc[loc_ser > 1]\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_with_bool_mask_and_values_matching_n_trues_in_length(self):\n # GH#30567\n ser = Series([None] * 10)\n mask = [False] * 3 + [True] * 5 + [False] * 2\n ser[mask] = range(5)\n result = ser\n expected = Series([None] * 3 + list(range(5)) + [None] * 2, dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_setitem_nan_with_bool(self):\n # GH 13034\n result = Series([True, False, True])\n with tm.assert_produces_warning(\n FutureWarning, match="item of incompatible dtype"\n ):\n result[0] = np.nan\n expected = Series([np.nan, False, True], dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_setitem_mask_smallint_upcast(self):\n orig = Series([1, 2, 3], dtype="int8")\n alt = np.array([999, 1000, 1001], dtype=np.int64)\n\n mask = np.array([True, False, True])\n\n ser = orig.copy()\n with tm.assert_produces_warning(\n FutureWarning, match="item of incompatible dtype"\n ):\n ser[mask] = Series(alt)\n expected = Series([999, 2, 1001])\n tm.assert_series_equal(ser, expected)\n\n ser2 = orig.copy()\n with tm.assert_produces_warning(\n FutureWarning, match="item of incompatible dtype"\n ):\n ser2.mask(mask, alt, inplace=True)\n tm.assert_series_equal(ser2, expected)\n\n ser3 = orig.copy()\n res = ser3.where(~mask, Series(alt))\n tm.assert_series_equal(res, expected)\n\n def test_setitem_mask_smallint_no_upcast(self):\n # like test_setitem_mask_smallint_upcast, but while we can't hold 'alt',\n # we *can* hold alt[mask] without casting\n orig = Series([1, 2, 3], dtype="uint8")\n alt = Series([245, 1000, 246], dtype=np.int64)\n\n mask = np.array([True, False, True])\n\n ser = orig.copy()\n ser[mask] = alt\n expected = Series([245, 2, 246], dtype="uint8")\n tm.assert_series_equal(ser, expected)\n\n ser2 = orig.copy()\n ser2.mask(mask, alt, inplace=True)\n tm.assert_series_equal(ser2, expected)\n\n # TODO: ser.where(~mask, alt) unnecessarily upcasts to int64\n ser3 = orig.copy()\n res = ser3.where(~mask, alt)\n tm.assert_series_equal(res, expected, check_dtype=False)\n\n\nclass TestSetitemViewCopySemantics:\n def test_setitem_invalidates_datetime_index_freq(self, using_copy_on_write):\n # GH#24096 altering a datetime64tz Series inplace invalidates the\n # `freq` attribute on the underlying DatetimeIndex\n\n dti = date_range("20130101", periods=3, tz="US/Eastern")\n ts = dti[1]\n ser = Series(dti)\n assert ser._values is not dti\n if using_copy_on_write:\n assert ser._values._ndarray.base is dti._data._ndarray.base\n else:\n assert ser._values._ndarray.base is not dti._data._ndarray.base\n assert dti.freq == "D"\n ser.iloc[1] = NaT\n assert ser._values.freq is None\n\n # check that the DatetimeIndex was not altered in place\n assert ser._values is not dti\n assert ser._values._ndarray.base is not dti._data._ndarray.base\n assert dti[1] == ts\n assert dti.freq == "D"\n\n def test_dt64tz_setitem_does_not_mutate_dti(self, using_copy_on_write):\n # GH#21907, GH#24096\n dti = date_range("2016-01-01", periods=10, tz="US/Pacific")\n ts = dti[0]\n ser = Series(dti)\n assert ser._values is not dti\n if using_copy_on_write:\n assert ser._values._ndarray.base is dti._data._ndarray.base\n assert ser._mgr.arrays[0]._ndarray.base is dti._data._ndarray.base\n else:\n assert ser._values._ndarray.base is not dti._data._ndarray.base\n assert ser._mgr.arrays[0]._ndarray.base is not dti._data._ndarray.base\n\n assert ser._mgr.arrays[0] is not dti\n\n ser[::3] = NaT\n assert ser[0] is NaT\n assert dti[0] == ts\n\n\nclass TestSetitemCallable:\n def test_setitem_callable_key(self):\n # GH#12533\n ser = Series([1, 2, 3, 4], index=list("ABCD"))\n ser[lambda x: "A"] = -1\n\n expected = Series([-1, 2, 3, 4], index=list("ABCD"))\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_callable_other(self):\n # GH#13299\n inc = lambda x: x + 1\n\n # set object dtype to avoid upcast when setting inc\n ser = Series([1, 2, -1, 4], dtype=object)\n ser[ser < 0] = inc\n\n expected = Series([1, 2, inc, 4])\n tm.assert_series_equal(ser, expected)\n\n\nclass TestSetitemWithExpansion:\n def test_setitem_empty_series(self):\n # GH#10193\n key = Timestamp("2012-01-01")\n series = Series(dtype=object)\n series[key] = 47\n expected = Series(47, [key])\n tm.assert_series_equal(series, expected)\n\n def test_setitem_empty_series_datetimeindex_preserves_freq(self):\n # GH#33573 our index should retain its freq\n dti = DatetimeIndex([], freq="D", dtype="M8[ns]")\n series = Series([], index=dti, dtype=object)\n key = Timestamp("2012-01-01")\n series[key] = 47\n expected = Series(47, DatetimeIndex([key], freq="D").as_unit("ns"))\n tm.assert_series_equal(series, expected)\n assert series.index.freq == expected.index.freq\n\n def test_setitem_empty_series_timestamp_preserves_dtype(self):\n # GH 21881\n timestamp = Timestamp(1412526600000000000)\n series = Series([timestamp], index=["timestamp"], dtype=object)\n expected = series["timestamp"]\n\n series = Series([], dtype=object)\n series["anything"] = 300.0\n series["timestamp"] = timestamp\n result = series["timestamp"]\n assert result == expected\n\n @pytest.mark.parametrize(\n "td",\n [\n Timedelta("9 days"),\n Timedelta("9 days").to_timedelta64(),\n Timedelta("9 days").to_pytimedelta(),\n ],\n )\n def test_append_timedelta_does_not_cast(self, td, using_infer_string, request):\n # GH#22717 inserting a Timedelta should _not_ cast to int64\n if using_infer_string and not isinstance(td, Timedelta):\n # TODO: GH#56010\n request.applymarker(pytest.mark.xfail(reason="inferred as string"))\n\n expected = Series(["x", td], index=[0, "td"], dtype=object)\n\n ser = Series(["x"])\n ser["td"] = td\n tm.assert_series_equal(ser, expected)\n assert isinstance(ser["td"], Timedelta)\n\n ser = Series(["x"])\n ser.loc["td"] = Timedelta("9 days")\n tm.assert_series_equal(ser, expected)\n assert isinstance(ser["td"], Timedelta)\n\n def test_setitem_with_expansion_type_promotion(self):\n # GH#12599\n ser = Series(dtype=object)\n ser["a"] = Timestamp("2016-01-01")\n ser["b"] = 3.0\n ser["c"] = "foo"\n expected = Series(\n [Timestamp("2016-01-01"), 3.0, "foo"],\n index=Index(["a", "b", "c"], dtype=object),\n )\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_not_contained(self, string_series):\n # set item that's not contained\n ser = string_series.copy()\n assert "foobar" not in ser.index\n ser["foobar"] = 1\n\n app = Series([1], index=["foobar"], name="series")\n expected = concat([string_series, app])\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_keep_precision(self, any_numeric_ea_dtype):\n # GH#32346\n ser = Series([1, 2], dtype=any_numeric_ea_dtype)\n ser[2] = 10\n expected = Series([1, 2, 10], dtype=any_numeric_ea_dtype)\n tm.assert_series_equal(ser, expected)\n\n @pytest.mark.parametrize(\n "na, target_na, dtype, target_dtype, indexer, warn",\n [\n (NA, NA, "Int64", "Int64", 1, None),\n (NA, NA, "Int64", "Int64", 2, None),\n (NA, np.nan, "int64", "float64", 1, None),\n (NA, np.nan, "int64", "float64", 2, None),\n (NaT, NaT, "int64", "object", 1, FutureWarning),\n (NaT, NaT, "int64", "object", 2, None),\n (np.nan, NA, "Int64", "Int64", 1, None),\n (np.nan, NA, "Int64", "Int64", 2, None),\n (np.nan, NA, "Float64", "Float64", 1, None),\n (np.nan, NA, "Float64", "Float64", 2, None),\n (np.nan, np.nan, "int64", "float64", 1, None),\n (np.nan, np.nan, "int64", "float64", 2, None),\n ],\n )\n def test_setitem_enlarge_with_na(\n self, na, target_na, dtype, target_dtype, indexer, warn\n ):\n # GH#32346\n ser = Series([1, 2], dtype=dtype)\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n ser[indexer] = na\n expected_values = [1, target_na] if indexer == 1 else [1, 2, target_na]\n expected = Series(expected_values, dtype=target_dtype)\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_enlargement_object_none(self, nulls_fixture, using_infer_string):\n # GH#48665\n ser = Series(["a", "b"])\n ser[3] = nulls_fixture\n dtype = (\n "str"\n if using_infer_string and not isinstance(nulls_fixture, Decimal)\n else object\n )\n expected = Series(["a", "b", nulls_fixture], index=[0, 1, 3], dtype=dtype)\n tm.assert_series_equal(ser, expected)\n if using_infer_string:\n ser[3] is np.nan\n else:\n assert ser[3] is nulls_fixture\n\n\ndef test_setitem_scalar_into_readonly_backing_data():\n # GH#14359: test that you cannot mutate a read only buffer\n\n array = np.zeros(5)\n array.flags.writeable = False # make the array immutable\n series = Series(array, copy=False)\n\n for n in series.index:\n msg = "assignment destination is read-only"\n with pytest.raises(ValueError, match=msg):\n series[n] = 1\n\n assert array[n] == 0\n\n\ndef test_setitem_slice_into_readonly_backing_data():\n # GH#14359: test that you cannot mutate a read only buffer\n\n array = np.zeros(5)\n array.flags.writeable = False # make the array immutable\n series = Series(array, copy=False)\n\n msg = "assignment destination is read-only"\n with pytest.raises(ValueError, match=msg):\n series[1:3] = 1\n\n assert not array.any()\n\n\ndef test_setitem_categorical_assigning_ops():\n orig = Series(Categorical(["b", "b"], categories=["a", "b"]))\n ser = orig.copy()\n ser[:] = "a"\n exp = Series(Categorical(["a", "a"], categories=["a", "b"]))\n tm.assert_series_equal(ser, exp)\n\n ser = orig.copy()\n ser[1] = "a"\n exp = Series(Categorical(["b", "a"], categories=["a", "b"]))\n tm.assert_series_equal(ser, exp)\n\n ser = orig.copy()\n ser[ser.index > 0] = "a"\n exp = Series(Categorical(["b", "a"], categories=["a", "b"]))\n tm.assert_series_equal(ser, exp)\n\n ser = orig.copy()\n ser[[False, True]] = "a"\n exp = Series(Categorical(["b", "a"], categories=["a", "b"]))\n tm.assert_series_equal(ser, exp)\n\n ser = orig.copy()\n ser.index = ["x", "y"]\n ser["y"] = "a"\n exp = Series(Categorical(["b", "a"], categories=["a", "b"]), index=["x", "y"])\n tm.assert_series_equal(ser, exp)\n\n\ndef test_setitem_nan_into_categorical():\n # ensure that one can set something to np.nan\n ser = Series(Categorical([1, 2, 3]))\n exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))\n ser[1] = np.nan\n tm.assert_series_equal(ser, exp)\n\n\nclass TestSetitemCasting:\n @pytest.mark.parametrize("unique", [True, False])\n @pytest.mark.parametrize("val", [3, 3.0, "3"], ids=type)\n def test_setitem_non_bool_into_bool(self, val, indexer_sli, unique):\n # dont cast these 3-like values to bool\n ser = Series([True, False])\n if not unique:\n ser.index = [1, 1]\n\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n indexer_sli(ser)[1] = val\n assert type(ser.iloc[1]) == type(val)\n\n expected = Series([True, val], dtype=object, index=ser.index)\n if not unique and indexer_sli is not tm.iloc:\n expected = Series([val, val], dtype=object, index=[1, 1])\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_boolean_array_into_npbool(self):\n # GH#45462\n ser = Series([True, False, True])\n values = ser._values\n arr = array([True, False, None])\n\n ser[:2] = arr[:2] # no NAs -> can set inplace\n assert ser._values is values\n\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser[1:] = arr[1:] # has an NA -> cast to boolean dtype\n expected = Series(arr)\n tm.assert_series_equal(ser, expected)\n\n\nclass SetitemCastingEquivalents:\n """\n Check each of several methods that _should_ be equivalent to `obj[key] = val`\n\n We assume that\n - obj.index is the default Index(range(len(obj)))\n - the setitem does not expand the obj\n """\n\n @pytest.fixture\n def is_inplace(self, obj, expected):\n """\n Whether we expect the setting to be in-place or not.\n """\n return expected.dtype == obj.dtype\n\n def check_indexer(self, obj, key, expected, val, indexer, is_inplace):\n orig = obj\n obj = obj.copy()\n arr = obj._values\n\n indexer(obj)[key] = val\n tm.assert_series_equal(obj, expected)\n\n self._check_inplace(is_inplace, orig, arr, obj)\n\n def _check_inplace(self, is_inplace, orig, arr, obj):\n if is_inplace is None:\n # We are not (yet) checking whether setting is inplace or not\n pass\n elif is_inplace:\n if arr.dtype.kind in ["m", "M"]:\n # We may not have the same DTA/TDA, but will have the same\n # underlying data\n assert arr._ndarray is obj._values._ndarray\n else:\n assert obj._values is arr\n else:\n # otherwise original array should be unchanged\n tm.assert_equal(arr, orig._values)\n\n def test_int_key(self, obj, key, expected, warn, val, indexer_sli, is_inplace):\n if not isinstance(key, int):\n pytest.skip("Not relevant for int key")\n\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, key, expected, val, indexer_sli, is_inplace)\n\n if indexer_sli is tm.loc:\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, key, expected, val, tm.at, is_inplace)\n elif indexer_sli is tm.iloc:\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, key, expected, val, tm.iat, is_inplace)\n\n rng = range(key, key + 1)\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, rng, expected, val, indexer_sli, is_inplace)\n\n if indexer_sli is not tm.loc:\n # Note: no .loc because that handles slice edges differently\n slc = slice(key, key + 1)\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, slc, expected, val, indexer_sli, is_inplace)\n\n ilkey = [key]\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, ilkey, expected, val, indexer_sli, is_inplace)\n\n indkey = np.array(ilkey)\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, indkey, expected, val, indexer_sli, is_inplace)\n\n genkey = (x for x in [key])\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, genkey, expected, val, indexer_sli, is_inplace)\n\n def test_slice_key(self, obj, key, expected, warn, val, indexer_sli, is_inplace):\n if not isinstance(key, slice):\n pytest.skip("Not relevant for slice key")\n\n if indexer_sli is not tm.loc:\n # Note: no .loc because that handles slice edges differently\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, key, expected, val, indexer_sli, is_inplace)\n\n ilkey = list(range(len(obj)))[key]\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, ilkey, expected, val, indexer_sli, is_inplace)\n\n indkey = np.array(ilkey)\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, indkey, expected, val, indexer_sli, is_inplace)\n\n genkey = (x for x in indkey)\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n self.check_indexer(obj, genkey, expected, val, indexer_sli, is_inplace)\n\n def test_mask_key(self, obj, key, expected, warn, val, indexer_sli):\n # setitem with boolean mask\n mask = np.zeros(obj.shape, dtype=bool)\n mask[key] = True\n\n obj = obj.copy()\n\n if is_list_like(val) and len(val) < mask.sum():\n msg = "boolean index did not match indexed array along dimension"\n with pytest.raises(IndexError, match=msg):\n indexer_sli(obj)[mask] = val\n return\n\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n indexer_sli(obj)[mask] = val\n tm.assert_series_equal(obj, expected)\n\n def test_series_where(self, obj, key, expected, warn, val, is_inplace):\n mask = np.zeros(obj.shape, dtype=bool)\n mask[key] = True\n\n if is_list_like(val) and len(val) < len(obj):\n # Series.where is not valid here\n msg = "operands could not be broadcast together with shapes"\n with pytest.raises(ValueError, match=msg):\n obj.where(~mask, val)\n return\n\n orig = obj\n obj = obj.copy()\n arr = obj._values\n\n res = obj.where(~mask, val)\n\n if val is NA and res.dtype == object:\n expected = expected.fillna(NA)\n elif val is None and res.dtype == object:\n assert expected.dtype == object\n expected = expected.copy()\n expected[expected.isna()] = None\n tm.assert_series_equal(res, expected)\n\n self._check_inplace(is_inplace, orig, arr, obj)\n\n def test_index_where(self, obj, key, expected, warn, val):\n mask = np.zeros(obj.shape, dtype=bool)\n mask[key] = True\n\n res = Index(obj, dtype=obj.dtype).where(~mask, val)\n expected_idx = Index(expected, dtype=expected.dtype)\n tm.assert_index_equal(res, expected_idx)\n\n def test_index_putmask(self, obj, key, expected, warn, val):\n mask = np.zeros(obj.shape, dtype=bool)\n mask[key] = True\n\n res = Index(obj, dtype=obj.dtype).putmask(mask, val)\n tm.assert_index_equal(res, Index(expected, dtype=expected.dtype))\n\n\n@pytest.mark.parametrize(\n "obj,expected,key,warn",\n [\n pytest.param(\n # GH#45568 setting a valid NA value into IntervalDtype[int] should\n # cast to IntervalDtype[float]\n Series(interval_range(1, 5)),\n Series(\n [Interval(1, 2), np.nan, Interval(3, 4), Interval(4, 5)],\n dtype="interval[float64]",\n ),\n 1,\n FutureWarning,\n id="interval_int_na_value",\n ),\n pytest.param(\n # these induce dtype changes\n Series([2, 3, 4, 5, 6, 7, 8, 9, 10]),\n Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan]),\n slice(None, None, 2),\n None,\n id="int_series_slice_key_step",\n ),\n pytest.param(\n Series([True, True, False, False]),\n Series([np.nan, True, np.nan, False], dtype=object),\n slice(None, None, 2),\n FutureWarning,\n id="bool_series_slice_key_step",\n ),\n pytest.param(\n # these induce dtype changes\n Series(np.arange(10)),\n Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9]),\n slice(None, 5),\n None,\n id="int_series_slice_key",\n ),\n pytest.param(\n # changes dtype GH#4463\n Series([1, 2, 3]),\n Series([np.nan, 2, 3]),\n 0,\n None,\n id="int_series_int_key",\n ),\n pytest.param(\n # changes dtype GH#4463\n Series([False]),\n Series([np.nan], dtype=object),\n # TODO: maybe go to float64 since we are changing the _whole_ Series?\n 0,\n FutureWarning,\n id="bool_series_int_key_change_all",\n ),\n pytest.param(\n # changes dtype GH#4463\n Series([False, True]),\n Series([np.nan, True], dtype=object),\n 0,\n FutureWarning,\n id="bool_series_int_key",\n ),\n ],\n)\nclass TestSetitemCastingEquivalents(SetitemCastingEquivalents):\n @pytest.fixture(params=[np.nan, np.float64("NaN"), None, NA])\n def val(self, request):\n """\n NA values that should generally be valid_na for *all* dtypes.\n\n Include both python float NaN and np.float64; only np.float64 has a\n `dtype` attribute.\n """\n return request.param\n\n\nclass TestSetitemTimedelta64IntoNumeric(SetitemCastingEquivalents):\n # timedelta64 should not be treated as integers when setting into\n # numeric Series\n\n @pytest.fixture\n def val(self):\n td = np.timedelta64(4, "ns")\n return td\n # TODO: could also try np.full((1,), td)\n\n @pytest.fixture(params=[complex, int, float])\n def dtype(self, request):\n return request.param\n\n @pytest.fixture\n def obj(self, dtype):\n arr = np.arange(5).astype(dtype)\n ser = Series(arr)\n return ser\n\n @pytest.fixture\n def expected(self, dtype):\n arr = np.arange(5).astype(dtype)\n ser = Series(arr)\n ser = ser.astype(object)\n ser.iloc[0] = np.timedelta64(4, "ns")\n return ser\n\n @pytest.fixture\n def key(self):\n return 0\n\n @pytest.fixture\n def warn(self):\n return FutureWarning\n\n\nclass TestSetitemDT64IntoInt(SetitemCastingEquivalents):\n # GH#39619 dont cast dt64 to int when doing this setitem\n\n @pytest.fixture(params=["M8[ns]", "m8[ns]"])\n def dtype(self, request):\n return request.param\n\n @pytest.fixture\n def scalar(self, dtype):\n val = np.datetime64("2021-01-18 13:25:00", "ns")\n if dtype == "m8[ns]":\n val = val - val\n return val\n\n @pytest.fixture\n def expected(self, scalar):\n expected = Series([scalar, scalar, 3], dtype=object)\n assert isinstance(expected[0], type(scalar))\n return expected\n\n @pytest.fixture\n def obj(self):\n return Series([1, 2, 3])\n\n @pytest.fixture\n def key(self):\n return slice(None, -1)\n\n @pytest.fixture(params=[None, list, np.array])\n def val(self, scalar, request):\n box = request.param\n if box is None:\n return scalar\n return box([scalar, scalar])\n\n @pytest.fixture\n def warn(self):\n return FutureWarning\n\n\nclass TestSetitemNAPeriodDtype(SetitemCastingEquivalents):\n # Setting compatible NA values into Series with PeriodDtype\n\n @pytest.fixture\n def expected(self, key):\n exp = Series(period_range("2000-01-01", periods=10, freq="D"))\n exp._values.view("i8")[key] = NaT._value\n assert exp[key] is NaT or all(x is NaT for x in exp[key])\n return exp\n\n @pytest.fixture\n def obj(self):\n return Series(period_range("2000-01-01", periods=10, freq="D"))\n\n @pytest.fixture(params=[3, slice(3, 5)])\n def key(self, request):\n return request.param\n\n @pytest.fixture(params=[None, np.nan])\n def val(self, request):\n return request.param\n\n @pytest.fixture\n def warn(self):\n return None\n\n\nclass TestSetitemNADatetimeLikeDtype(SetitemCastingEquivalents):\n # some nat-like values should be cast to datetime64/timedelta64 when\n # inserting into a datetime64/timedelta64 series. Others should coerce\n # to object and retain their dtypes.\n # GH#18586 for td64 and boolean mask case\n\n @pytest.fixture(\n params=["m8[ns]", "M8[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Central]"]\n )\n def dtype(self, request):\n return request.param\n\n @pytest.fixture\n def obj(self, dtype):\n i8vals = date_range("2016-01-01", periods=3).asi8\n idx = Index(i8vals, dtype=dtype)\n assert idx.dtype == dtype\n return Series(idx)\n\n @pytest.fixture(\n params=[\n None,\n np.nan,\n NaT,\n np.timedelta64("NaT", "ns"),\n np.datetime64("NaT", "ns"),\n ]\n )\n def val(self, request):\n return request.param\n\n @pytest.fixture\n def is_inplace(self, val, obj):\n # td64 -> cast to object iff val is datetime64("NaT")\n # dt64 -> cast to object iff val is timedelta64("NaT")\n # dt64tz -> cast to object with anything _but_ NaT\n return val is NaT or val is None or val is np.nan or obj.dtype == val.dtype\n\n @pytest.fixture\n def expected(self, obj, val, is_inplace):\n dtype = obj.dtype if is_inplace else object\n expected = Series([val] + list(obj[1:]), dtype=dtype)\n return expected\n\n @pytest.fixture\n def key(self):\n return 0\n\n @pytest.fixture\n def warn(self, is_inplace):\n return None if is_inplace else FutureWarning\n\n\nclass TestSetitemMismatchedTZCastsToObject(SetitemCastingEquivalents):\n # GH#24024\n @pytest.fixture\n def obj(self):\n return Series(date_range("2000", periods=2, tz="US/Central"))\n\n @pytest.fixture\n def val(self):\n return Timestamp("2000", tz="US/Eastern")\n\n @pytest.fixture\n def key(self):\n return 0\n\n @pytest.fixture\n def expected(self, obj, val):\n # pre-2.0 this would cast to object, in 2.0 we cast the val to\n # the target tz\n expected = Series(\n [\n val.tz_convert("US/Central"),\n Timestamp("2000-01-02 00:00:00-06:00", tz="US/Central"),\n ],\n dtype=obj.dtype,\n )\n return expected\n\n @pytest.fixture\n def warn(self):\n return None\n\n\n@pytest.mark.parametrize(\n "obj,expected,warn",\n [\n # For numeric series, we should coerce to NaN.\n (Series([1, 2, 3]), Series([np.nan, 2, 3]), None),\n (Series([1.0, 2.0, 3.0]), Series([np.nan, 2.0, 3.0]), None),\n # For datetime series, we should coerce to NaT.\n (\n Series([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]),\n Series([NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),\n None,\n ),\n # For objects, we should preserve the None value.\n (Series(["foo", "bar", "baz"]), Series([None, "bar", "baz"]), None),\n ],\n)\nclass TestSeriesNoneCoercion(SetitemCastingEquivalents):\n @pytest.fixture\n def key(self):\n return 0\n\n @pytest.fixture\n def val(self):\n return None\n\n\nclass TestSetitemFloatIntervalWithIntIntervalValues(SetitemCastingEquivalents):\n # GH#44201 Cast to shared IntervalDtype rather than object\n\n def test_setitem_example(self):\n # Just a case here to make obvious what this test class is aimed at\n idx = IntervalIndex.from_breaks(range(4))\n obj = Series(idx)\n val = Interval(0.5, 1.5)\n\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype"\n ):\n obj[0] = val\n assert obj.dtype == "Interval[float64, right]"\n\n @pytest.fixture\n def obj(self):\n idx = IntervalIndex.from_breaks(range(4))\n return Series(idx)\n\n @pytest.fixture\n def val(self):\n return Interval(0.5, 1.5)\n\n @pytest.fixture\n def key(self):\n return 0\n\n @pytest.fixture\n def expected(self, obj, val):\n data = [val] + list(obj[1:])\n idx = IntervalIndex(data, dtype="Interval[float64]")\n return Series(idx)\n\n @pytest.fixture\n def warn(self):\n return FutureWarning\n\n\nclass TestSetitemRangeIntoIntegerSeries(SetitemCastingEquivalents):\n # GH#44261 Setting a range with sufficiently-small integers into\n # small-itemsize integer dtypes should not need to upcast\n\n @pytest.fixture\n def obj(self, any_int_numpy_dtype):\n dtype = np.dtype(any_int_numpy_dtype)\n ser = Series(range(5), dtype=dtype)\n return ser\n\n @pytest.fixture\n def val(self):\n return range(2, 4)\n\n @pytest.fixture\n def key(self):\n return slice(0, 2)\n\n @pytest.fixture\n def expected(self, any_int_numpy_dtype):\n dtype = np.dtype(any_int_numpy_dtype)\n exp = Series([2, 3, 2, 3, 4], dtype=dtype)\n return exp\n\n @pytest.fixture\n def warn(self):\n return None\n\n\n@pytest.mark.parametrize(\n "val, warn",\n [\n (np.array([2.0, 3.0]), None),\n (np.array([2.5, 3.5]), FutureWarning),\n (\n np.array([2**65, 2**65 + 1], dtype=np.float64),\n FutureWarning,\n ), # all ints, but can't cast\n ],\n)\nclass TestSetitemFloatNDarrayIntoIntegerSeries(SetitemCastingEquivalents):\n @pytest.fixture\n def obj(self):\n return Series(range(5), dtype=np.int64)\n\n @pytest.fixture\n def key(self):\n return slice(0, 2)\n\n @pytest.fixture\n def expected(self, val):\n if val[0] == 2:\n # NB: this condition is based on currently-hardcoded "val" cases\n dtype = np.int64\n else:\n dtype = np.float64\n res_values = np.array(range(5), dtype=dtype)\n res_values[:2] = val\n return Series(res_values)\n\n\n@pytest.mark.parametrize("val", [512, np.int16(512)])\nclass TestSetitemIntoIntegerSeriesNeedsUpcast(SetitemCastingEquivalents):\n @pytest.fixture\n def obj(self):\n return Series([1, 2, 3], dtype=np.int8)\n\n @pytest.fixture\n def key(self):\n return 1\n\n @pytest.fixture\n def expected(self):\n return Series([1, 512, 3], dtype=np.int16)\n\n @pytest.fixture\n def warn(self):\n return FutureWarning\n\n\n@pytest.mark.parametrize("val", [2**33 + 1.0, 2**33 + 1.1, 2**62])\nclass TestSmallIntegerSetitemUpcast(SetitemCastingEquivalents):\n # https://github.com/pandas-dev/pandas/issues/39584#issuecomment-941212124\n @pytest.fixture\n def obj(self):\n return Series([1, 2, 3], dtype="i4")\n\n @pytest.fixture\n def key(self):\n return 0\n\n @pytest.fixture\n def expected(self, val):\n if val % 1 != 0:\n dtype = "f8"\n else:\n dtype = "i8"\n return Series([val, 2, 3], dtype=dtype)\n\n @pytest.fixture\n def warn(self):\n return FutureWarning\n\n\nclass CoercionTest(SetitemCastingEquivalents):\n # Tests ported from tests.indexing.test_coercion\n\n @pytest.fixture\n def key(self):\n return 1\n\n @pytest.fixture\n def expected(self, obj, key, val, exp_dtype):\n vals = list(obj)\n vals[key] = val\n return Series(vals, dtype=exp_dtype)\n\n\n@pytest.mark.parametrize(\n "val,exp_dtype,warn",\n [(np.int32(1), np.int8, None), (np.int16(2**9), np.int16, FutureWarning)],\n)\nclass TestCoercionInt8(CoercionTest):\n # previously test_setitem_series_int8 in tests.indexing.test_coercion\n @pytest.fixture\n def obj(self):\n return Series([1, 2, 3, 4], dtype=np.int8)\n\n\n@pytest.mark.parametrize("val", [1, 1.1, 1 + 1j, True])\n@pytest.mark.parametrize("exp_dtype", [object])\nclass TestCoercionObject(CoercionTest):\n # previously test_setitem_series_object in tests.indexing.test_coercion\n @pytest.fixture\n def obj(self):\n return Series(["a", "b", "c", "d"], dtype=object)\n\n @pytest.fixture\n def warn(self):\n return None\n\n\n@pytest.mark.parametrize(\n "val,exp_dtype,warn",\n [\n (1, np.complex128, None),\n (1.1, np.complex128, None),\n (1 + 1j, np.complex128, None),\n (True, object, FutureWarning),\n ],\n)\nclass TestCoercionComplex(CoercionTest):\n # previously test_setitem_series_complex128 in tests.indexing.test_coercion\n @pytest.fixture\n def obj(self):\n return Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])\n\n\n@pytest.mark.parametrize(\n "val,exp_dtype,warn",\n [\n (1, object, FutureWarning),\n ("3", object, FutureWarning),\n (3, object, FutureWarning),\n (1.1, object, FutureWarning),\n (1 + 1j, object, FutureWarning),\n (True, bool, None),\n ],\n)\nclass TestCoercionBool(CoercionTest):\n # previously test_setitem_series_bool in tests.indexing.test_coercion\n @pytest.fixture\n def obj(self):\n return Series([True, False, True, False], dtype=bool)\n\n\n@pytest.mark.parametrize(\n "val,exp_dtype,warn",\n [\n (1, np.int64, None),\n (1.1, np.float64, FutureWarning),\n (1 + 1j, np.complex128, FutureWarning),\n (True, object, FutureWarning),\n ],\n)\nclass TestCoercionInt64(CoercionTest):\n # previously test_setitem_series_int64 in tests.indexing.test_coercion\n @pytest.fixture\n def obj(self):\n return Series([1, 2, 3, 4])\n\n\n@pytest.mark.parametrize(\n "val,exp_dtype,warn",\n [\n (1, np.float64, None),\n (1.1, np.float64, None),\n (1 + 1j, np.complex128, FutureWarning),\n (True, object, FutureWarning),\n ],\n)\nclass TestCoercionFloat64(CoercionTest):\n # previously test_setitem_series_float64 in tests.indexing.test_coercion\n @pytest.fixture\n def obj(self):\n return Series([1.1, 2.2, 3.3, 4.4])\n\n\n@pytest.mark.parametrize(\n "val,exp_dtype,warn",\n [\n (1, np.float32, None),\n pytest.param(\n 1.1,\n np.float32,\n None,\n marks=pytest.mark.xfail(\n (\n not np_version_gte1p24\n or (\n np_version_gte1p24\n and not np_version_gt2\n and os.environ.get("NPY_PROMOTION_STATE", "legacy") != "weak"\n )\n ),\n reason="np.float32(1.1) ends up as 1.100000023841858, so "\n "np_can_hold_element raises and we cast to float64",\n ),\n ),\n (1 + 1j, np.complex128, FutureWarning),\n (True, object, FutureWarning),\n (np.uint8(2), np.float32, None),\n (np.uint32(2), np.float32, None),\n # float32 cannot hold np.iinfo(np.uint32).max exactly\n # (closest it can hold is 4294967300.0 which off by 5.0), so\n # we cast to float64\n (np.uint32(np.iinfo(np.uint32).max), np.float64, FutureWarning),\n (np.uint64(2), np.float32, None),\n (np.int64(2), np.float32, None),\n ],\n)\nclass TestCoercionFloat32(CoercionTest):\n @pytest.fixture\n def obj(self):\n return Series([1.1, 2.2, 3.3, 4.4], dtype=np.float32)\n\n def test_slice_key(self, obj, key, expected, warn, val, indexer_sli, is_inplace):\n super().test_slice_key(obj, key, expected, warn, val, indexer_sli, is_inplace)\n\n if isinstance(val, float):\n # the xfail would xpass bc test_slice_key short-circuits\n raise AssertionError("xfail not relevant for this test.")\n\n\n@pytest.mark.parametrize(\n "val,exp_dtype,warn",\n [\n (Timestamp("2012-01-01"), "datetime64[ns]", None),\n (1, object, FutureWarning),\n ("x", object, FutureWarning),\n ],\n)\nclass TestCoercionDatetime64(CoercionTest):\n # previously test_setitem_series_datetime64 in tests.indexing.test_coercion\n\n @pytest.fixture\n def obj(self):\n return Series(date_range("2011-01-01", freq="D", periods=4))\n\n @pytest.fixture\n def warn(self):\n return None\n\n\n@pytest.mark.parametrize(\n "val,exp_dtype,warn",\n [\n (Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]", None),\n # pre-2.0, a mis-matched tz would end up casting to object\n (Timestamp("2012-01-01", tz="US/Pacific"), "datetime64[ns, US/Eastern]", None),\n (Timestamp("2012-01-01"), object, FutureWarning),\n (1, object, FutureWarning),\n ],\n)\nclass TestCoercionDatetime64TZ(CoercionTest):\n # previously test_setitem_series_datetime64tz in tests.indexing.test_coercion\n @pytest.fixture\n def obj(self):\n tz = "US/Eastern"\n return Series(date_range("2011-01-01", freq="D", periods=4, tz=tz))\n\n @pytest.fixture\n def warn(self):\n return None\n\n\n@pytest.mark.parametrize(\n "val,exp_dtype,warn",\n [\n (Timedelta("12 day"), "timedelta64[ns]", None),\n (1, object, FutureWarning),\n ("x", object, FutureWarning),\n ],\n)\nclass TestCoercionTimedelta64(CoercionTest):\n # previously test_setitem_series_timedelta64 in tests.indexing.test_coercion\n @pytest.fixture\n def obj(self):\n return Series(timedelta_range("1 day", periods=4))\n\n @pytest.fixture\n def warn(self):\n return None\n\n\n@pytest.mark.parametrize(\n "val", ["foo", Period("2016", freq="Y"), Interval(1, 2, closed="both")]\n)\n@pytest.mark.parametrize("exp_dtype", [object])\nclass TestPeriodIntervalCoercion(CoercionTest):\n # GH#45768\n @pytest.fixture(\n params=[\n period_range("2016-01-01", periods=3, freq="D"),\n interval_range(1, 5),\n ]\n )\n def obj(self, request):\n return Series(request.param)\n\n @pytest.fixture\n def warn(self):\n return FutureWarning\n\n\ndef test_20643():\n # closed by GH#45121\n orig = Series([0, 1, 2], index=["a", "b", "c"])\n\n expected = Series([0, 2.7, 2], index=["a", "b", "c"])\n\n ser = orig.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser.at["b"] = 2.7\n tm.assert_series_equal(ser, expected)\n\n ser = orig.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser.loc["b"] = 2.7\n tm.assert_series_equal(ser, expected)\n\n ser = orig.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser["b"] = 2.7\n tm.assert_series_equal(ser, expected)\n\n ser = orig.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser.iat[1] = 2.7\n tm.assert_series_equal(ser, expected)\n\n ser = orig.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser.iloc[1] = 2.7\n tm.assert_series_equal(ser, expected)\n\n orig_df = orig.to_frame("A")\n expected_df = expected.to_frame("A")\n\n df = orig_df.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n df.at["b", "A"] = 2.7\n tm.assert_frame_equal(df, expected_df)\n\n df = orig_df.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n df.loc["b", "A"] = 2.7\n tm.assert_frame_equal(df, expected_df)\n\n df = orig_df.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n df.iloc[1, 0] = 2.7\n tm.assert_frame_equal(df, expected_df)\n\n df = orig_df.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n df.iat[1, 0] = 2.7\n tm.assert_frame_equal(df, expected_df)\n\n\ndef test_20643_comment():\n # https://github.com/pandas-dev/pandas/issues/20643#issuecomment-431244590\n # fixed sometime prior to GH#45121\n orig = Series([0, 1, 2], index=["a", "b", "c"])\n expected = Series([np.nan, 1, 2], index=["a", "b", "c"])\n\n ser = orig.copy()\n ser.iat[0] = None\n tm.assert_series_equal(ser, expected)\n\n ser = orig.copy()\n ser.iloc[0] = None\n tm.assert_series_equal(ser, expected)\n\n\ndef test_15413():\n # fixed by GH#45121\n ser = Series([1, 2, 3])\n\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser[ser == 2] += 0.5\n expected = Series([1, 2.5, 3])\n tm.assert_series_equal(ser, expected)\n\n ser = Series([1, 2, 3])\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser[1] += 0.5\n tm.assert_series_equal(ser, expected)\n\n ser = Series([1, 2, 3])\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser.loc[1] += 0.5\n tm.assert_series_equal(ser, expected)\n\n ser = Series([1, 2, 3])\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser.iloc[1] += 0.5\n tm.assert_series_equal(ser, expected)\n\n ser = Series([1, 2, 3])\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser.iat[1] += 0.5\n tm.assert_series_equal(ser, expected)\n\n ser = Series([1, 2, 3])\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser.at[1] += 0.5\n tm.assert_series_equal(ser, expected)\n\n\ndef test_32878_int_itemsize():\n # Fixed by GH#45121\n arr = np.arange(5).astype("i4")\n ser = Series(arr)\n val = np.int64(np.iinfo(np.int64).max)\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser[0] = val\n expected = Series([val, 1, 2, 3, 4], dtype=np.int64)\n tm.assert_series_equal(ser, expected)\n\n\ndef test_32878_complex_itemsize():\n arr = np.arange(5).astype("c8")\n ser = Series(arr)\n val = np.finfo(np.float64).max\n val = val.astype("c16")\n\n # GH#32878 used to coerce val to inf+0.000000e+00j\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser[0] = val\n assert ser[0] == val\n expected = Series([val, 1, 2, 3, 4], dtype="c16")\n tm.assert_series_equal(ser, expected)\n\n\ndef test_37692(indexer_al):\n # GH#37692\n ser = Series([1, 2, 3], index=["a", "b", "c"])\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n indexer_al(ser)["b"] = "test"\n expected = Series([1, "test", 3], index=["a", "b", "c"], dtype=object)\n tm.assert_series_equal(ser, expected)\n\n\ndef test_setitem_bool_int_float_consistency(indexer_sli):\n # GH#21513\n # bool-with-int and bool-with-float both upcast to object\n # int-with-float and float-with-int are both non-casting so long\n # as the setitem can be done losslessly\n for dtype in [np.float64, np.int64]:\n ser = Series(0, index=range(3), dtype=dtype)\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n indexer_sli(ser)[0] = True\n assert ser.dtype == object\n\n ser = Series(0, index=range(3), dtype=bool)\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser[0] = dtype(1)\n assert ser.dtype == object\n\n # 1.0 can be held losslessly, so no casting\n ser = Series(0, index=range(3), dtype=np.int64)\n indexer_sli(ser)[0] = np.float64(1.0)\n assert ser.dtype == np.int64\n\n # 1 can be held losslessly, so no casting\n ser = Series(0, index=range(3), dtype=np.float64)\n indexer_sli(ser)[0] = np.int64(1)\n\n\ndef test_setitem_positional_with_casting():\n # GH#45070 case where in __setitem__ we get a KeyError, then when\n # we fallback we *also* get a ValueError if we try to set inplace.\n ser = Series([1, 2, 3], index=["a", "b", "c"])\n\n warn_msg = "Series.__setitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n ser[0] = "X"\n expected = Series(["X", 2, 3], index=["a", "b", "c"], dtype=object)\n tm.assert_series_equal(ser, expected)\n\n\ndef test_setitem_positional_float_into_int_coerces():\n # Case where we hit a KeyError and then trying to set in-place incorrectly\n # casts a float to an int\n ser = Series([1, 2, 3], index=["a", "b", "c"])\n\n warn_msg = "Series.__setitem__ treating keys as positions is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n ser[0] = 1.5\n expected = Series([1.5, 2, 3], index=["a", "b", "c"])\n tm.assert_series_equal(ser, expected)\n\n\ndef test_setitem_int_not_positional():\n # GH#42215 deprecated falling back to positional on __setitem__ with an\n # int not contained in the index; enforced in 2.0\n ser = Series([1, 2, 3, 4], index=[1.1, 2.1, 3.0, 4.1])\n assert not ser.index._should_fallback_to_positional\n # assert not ser.index.astype(object)._should_fallback_to_positional\n\n # 3.0 is in our index, so post-enforcement behavior is unchanged\n ser[3] = 10\n expected = Series([1, 2, 10, 4], index=ser.index)\n tm.assert_series_equal(ser, expected)\n\n # pre-enforcement `ser[5] = 5` raised IndexError\n ser[5] = 5\n expected = Series([1, 2, 10, 4, 5], index=[1.1, 2.1, 3.0, 4.1, 5.0])\n tm.assert_series_equal(ser, expected)\n\n ii = IntervalIndex.from_breaks(range(10))[::2]\n ser2 = Series(range(len(ii)), index=ii)\n exp_index = ii.astype(object).append(Index([4]))\n expected2 = Series([0, 1, 2, 3, 4, 9], index=exp_index)\n # pre-enforcement `ser2[4] = 9` interpreted 4 as positional\n ser2[4] = 9\n tm.assert_series_equal(ser2, expected2)\n\n mi = MultiIndex.from_product([ser.index, ["A", "B"]])\n ser3 = Series(range(len(mi)), index=mi)\n expected3 = ser3.copy()\n expected3.loc[4] = 99\n # pre-enforcement `ser3[4] = 99` interpreted 4 as positional\n ser3[4] = 99\n tm.assert_series_equal(ser3, expected3)\n\n\ndef test_setitem_with_bool_indexer():\n # GH#42530\n\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n result = df.pop("b").copy()\n result[[True, False, False]] = 9\n expected = Series(data=[9, 5, 6], name="b")\n tm.assert_series_equal(result, expected)\n\n df.loc[[True, False, False], "a"] = 10\n expected = DataFrame({"a": [10, 2, 3]})\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize("size", range(2, 6))\n@pytest.mark.parametrize(\n "mask", [[True, False, False, False, False], [True, False], [False]]\n)\n@pytest.mark.parametrize(\n "item", [2.0, np.nan, np.finfo(float).max, np.finfo(float).min]\n)\n# Test numpy arrays, lists and tuples as the input to be\n# broadcast\n@pytest.mark.parametrize(\n "box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]\n)\ndef test_setitem_bool_indexer_dont_broadcast_length1_values(size, mask, item, box):\n # GH#44265\n # see also tests.series.indexing.test_where.test_broadcast\n\n selection = np.resize(mask, size)\n\n data = np.arange(size, dtype=float)\n\n ser = Series(data)\n\n if selection.sum() != 1:\n msg = (\n "cannot set using a list-like indexer with a different "\n "length than the value"\n )\n with pytest.raises(ValueError, match=msg):\n # GH#44265\n ser[selection] = box(item)\n else:\n # In this corner case setting is equivalent to setting with the unboxed\n # item\n ser[selection] = box(item)\n\n expected = Series(np.arange(size, dtype=float))\n expected[selection] = item\n tm.assert_series_equal(ser, expected)\n\n\ndef test_setitem_empty_mask_dont_upcast_dt64():\n dti = date_range("2016-01-01", periods=3)\n ser = Series(dti)\n orig = ser.copy()\n mask = np.zeros(3, dtype=bool)\n\n ser[mask] = "foo"\n assert ser.dtype == dti.dtype # no-op -> dont upcast\n tm.assert_series_equal(ser, orig)\n\n ser.mask(mask, "foo", inplace=True)\n assert ser.dtype == dti.dtype # no-op -> dont upcast\n tm.assert_series_equal(ser, orig)\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_setitem.py | test_setitem.py | Python | 59,920 | 0.75 | 0.118919 | 0.092175 | vue-tools | 485 | 2023-08-01T19:10:40.295662 | GPL-3.0 | true | d126b1c4fc2d433f275f131302081978 |
from datetime import datetime\n\nimport numpy as np\n\nfrom pandas import (\n DatetimeIndex,\n Series,\n)\nimport pandas._testing as tm\n\n\ndef test_series_set_value():\n # GH#1561\n\n dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]\n index = DatetimeIndex(dates)\n\n s = Series(dtype=object)\n s._set_value(dates[0], 1.0)\n s._set_value(dates[1], np.nan)\n\n expected = Series([1.0, np.nan], index=index)\n\n tm.assert_series_equal(s, expected)\n\n\ndef test_set_value_dt64(datetime_series):\n idx = datetime_series.index[10]\n res = datetime_series._set_value(idx, 0)\n assert res is None\n assert datetime_series[idx] == 0\n\n\ndef test_set_value_str_index(string_series):\n # equiv\n ser = string_series.copy()\n res = ser._set_value("foobar", 0)\n assert res is None\n assert ser.index[-1] == "foobar"\n assert ser["foobar"] == 0\n\n ser2 = string_series.copy()\n ser2.loc["foobar"] = 0\n assert ser2.index[-1] == "foobar"\n assert ser2["foobar"] == 0\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_set_value.py | test_set_value.py | Python | 991 | 0.95 | 0.066667 | 0.0625 | vue-tools | 474 | 2024-09-28T06:50:06.518230 | GPL-3.0 | true | 83863974c4077b2a2d1117ecd6bfb334 |
import pytest\n\nimport pandas as pd\nfrom pandas import Series\nimport pandas._testing as tm\n\n\ndef test_take_validate_axis():\n # GH#51022\n ser = Series([-1, 5, 6, 2, 4])\n\n msg = "No axis named foo for object type Series"\n with pytest.raises(ValueError, match=msg):\n ser.take([1, 2], axis="foo")\n\n\ndef test_take():\n ser = Series([-1, 5, 6, 2, 4])\n\n actual = ser.take([1, 3, 4])\n expected = Series([5, 2, 4], index=[1, 3, 4])\n tm.assert_series_equal(actual, expected)\n\n actual = ser.take([-1, 3, 4])\n expected = Series([4, 2, 4], index=[4, 3, 4])\n tm.assert_series_equal(actual, expected)\n\n msg = "indices are out-of-bounds"\n with pytest.raises(IndexError, match=msg):\n ser.take([1, 10])\n with pytest.raises(IndexError, match=msg):\n ser.take([2, 5])\n\n\ndef test_take_categorical():\n # https://github.com/pandas-dev/pandas/issues/20664\n ser = Series(pd.Categorical(["a", "b", "c"]))\n result = ser.take([-2, -2, 0])\n expected = Series(\n pd.Categorical(["b", "b", "a"], categories=["a", "b", "c"]), index=[1, 1, 0]\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_take_slice_raises():\n ser = Series([-1, 5, 6, 2, 4])\n\n msg = "Series.take requires a sequence of integers, not slice"\n with pytest.raises(TypeError, match=msg):\n ser.take(slice(0, 3, 1))\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_take.py | test_take.py | Python | 1,353 | 0.95 | 0.1 | 0.055556 | awesome-app | 688 | 2024-09-09T06:19:54.221920 | MIT | true | 45160bb761df2135ec155d581e8a345a |
import numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_integer\n\nimport pandas as pd\nfrom pandas import (\n Series,\n Timestamp,\n date_range,\n isna,\n)\nimport pandas._testing as tm\n\n\ndef test_where_unsafe_int(any_signed_int_numpy_dtype):\n s = Series(np.arange(10), dtype=any_signed_int_numpy_dtype)\n mask = s < 5\n\n s[mask] = range(2, 7)\n expected = Series(\n list(range(2, 7)) + list(range(5, 10)),\n dtype=any_signed_int_numpy_dtype,\n )\n\n tm.assert_series_equal(s, expected)\n\n\ndef test_where_unsafe_float(float_numpy_dtype):\n s = Series(np.arange(10), dtype=float_numpy_dtype)\n mask = s < 5\n\n s[mask] = range(2, 7)\n data = list(range(2, 7)) + list(range(5, 10))\n expected = Series(data, dtype=float_numpy_dtype)\n\n tm.assert_series_equal(s, expected)\n\n\n@pytest.mark.parametrize(\n "dtype,expected_dtype",\n [\n (np.int8, np.float64),\n (np.int16, np.float64),\n (np.int32, np.float64),\n (np.int64, np.float64),\n (np.float32, np.float32),\n (np.float64, np.float64),\n ],\n)\ndef test_where_unsafe_upcast(dtype, expected_dtype):\n # see gh-9743\n s = Series(np.arange(10), dtype=dtype)\n values = [2.5, 3.5, 4.5, 5.5, 6.5]\n mask = s < 5\n expected = Series(values + list(range(5, 10)), dtype=expected_dtype)\n warn = (\n None\n if np.dtype(dtype).kind == np.dtype(expected_dtype).kind == "f"\n else FutureWarning\n )\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n s[mask] = values\n tm.assert_series_equal(s, expected)\n\n\ndef test_where_unsafe():\n # see gh-9731\n s = Series(np.arange(10), dtype="int64")\n values = [2.5, 3.5, 4.5, 5.5]\n\n mask = s > 5\n expected = Series(list(range(6)) + values, dtype="float64")\n\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n s[mask] = values\n tm.assert_series_equal(s, expected)\n\n # see gh-3235\n s = Series(np.arange(10), dtype="int64")\n mask = s < 5\n s[mask] = range(2, 7)\n expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")\n tm.assert_series_equal(s, expected)\n assert s.dtype == expected.dtype\n\n s = Series(np.arange(10), dtype="int64")\n mask = s > 5\n s[mask] = [0] * 4\n expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")\n tm.assert_series_equal(s, expected)\n\n s = Series(np.arange(10))\n mask = s > 5\n\n msg = "cannot set using a list-like indexer with a different length than the value"\n with pytest.raises(ValueError, match=msg):\n s[mask] = [5, 4, 3, 2, 1]\n\n with pytest.raises(ValueError, match=msg):\n s[mask] = [0] * 5\n\n # dtype changes\n s = Series([1, 2, 3, 4])\n result = s.where(s > 2, np.nan)\n expected = Series([np.nan, np.nan, 3, 4])\n tm.assert_series_equal(result, expected)\n\n # GH 4667\n # setting with None changes dtype\n s = Series(range(10)).astype(float)\n s[8] = None\n result = s[8]\n assert isna(result)\n\n s = Series(range(10)).astype(float)\n s[s > 8] = None\n result = s[isna(s)]\n expected = Series(np.nan, index=[9])\n tm.assert_series_equal(result, expected)\n\n\ndef test_where():\n s = Series(np.random.default_rng(2).standard_normal(5))\n cond = s > 0\n\n rs = s.where(cond).dropna()\n rs2 = s[cond]\n tm.assert_series_equal(rs, rs2)\n\n rs = s.where(cond, -s)\n tm.assert_series_equal(rs, s.abs())\n\n rs = s.where(cond)\n assert s.shape == rs.shape\n assert rs is not s\n\n # test alignment\n cond = Series([True, False, False, True, False], index=s.index)\n s2 = -(s.abs())\n\n expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)\n rs = s2.where(cond[:3])\n tm.assert_series_equal(rs, expected)\n\n expected = s2.abs()\n expected.iloc[0] = s2[0]\n rs = s2.where(cond[:3], -s2)\n tm.assert_series_equal(rs, expected)\n\n\ndef test_where_error():\n s = Series(np.random.default_rng(2).standard_normal(5))\n cond = s > 0\n\n msg = "Array conditional must be same shape as self"\n with pytest.raises(ValueError, match=msg):\n s.where(1)\n with pytest.raises(ValueError, match=msg):\n s.where(cond[:3].values, -s)\n\n # GH 2745\n s = Series([1, 2])\n s[[True, False]] = [0, 1]\n expected = Series([0, 2])\n tm.assert_series_equal(s, expected)\n\n # failures\n msg = "cannot set using a list-like indexer with a different length than the value"\n with pytest.raises(ValueError, match=msg):\n s[[True, False]] = [0, 2, 3]\n\n with pytest.raises(ValueError, match=msg):\n s[[True, False]] = []\n\n\n@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])\ndef test_where_array_like(klass):\n # see gh-15414\n s = Series([1, 2, 3])\n cond = [False, True, True]\n expected = Series([np.nan, 2, 3])\n\n result = s.where(klass(cond))\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "cond",\n [\n [1, 0, 1],\n Series([2, 5, 7]),\n ["True", "False", "True"],\n [Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],\n ],\n)\ndef test_where_invalid_input(cond):\n # see gh-15414: only boolean arrays accepted\n s = Series([1, 2, 3])\n msg = "Boolean array expected for the condition"\n\n with pytest.raises(ValueError, match=msg):\n s.where(cond)\n\n msg = "Array conditional must be same shape as self"\n with pytest.raises(ValueError, match=msg):\n s.where([True])\n\n\ndef test_where_ndframe_align():\n msg = "Array conditional must be same shape as self"\n s = Series([1, 2, 3])\n\n cond = [True]\n with pytest.raises(ValueError, match=msg):\n s.where(cond)\n\n expected = Series([1, np.nan, np.nan])\n\n out = s.where(Series(cond))\n tm.assert_series_equal(out, expected)\n\n cond = np.array([False, True, False, True])\n with pytest.raises(ValueError, match=msg):\n s.where(cond)\n\n expected = Series([np.nan, 2, np.nan])\n\n out = s.where(Series(cond))\n tm.assert_series_equal(out, expected)\n\n\ndef test_where_setitem_invalid():\n # GH 2702\n # make sure correct exceptions are raised on invalid list assignment\n\n msg = (\n lambda x: f"cannot set using a {x} indexer with a "\n "different length than the value"\n )\n # slice\n s = Series(list("abc"), dtype=object)\n\n with pytest.raises(ValueError, match=msg("slice")):\n s[0:3] = list(range(27))\n\n s[0:3] = list(range(3))\n expected = Series([0, 1, 2])\n tm.assert_series_equal(s.astype(np.int64), expected)\n\n # slice with step\n s = Series(list("abcdef"), dtype=object)\n\n with pytest.raises(ValueError, match=msg("slice")):\n s[0:4:2] = list(range(27))\n\n s = Series(list("abcdef"), dtype=object)\n s[0:4:2] = list(range(2))\n expected = Series([0, "b", 1, "d", "e", "f"])\n tm.assert_series_equal(s, expected)\n\n # neg slices\n s = Series(list("abcdef"), dtype=object)\n\n with pytest.raises(ValueError, match=msg("slice")):\n s[:-1] = list(range(27))\n\n s[-3:-1] = list(range(2))\n expected = Series(["a", "b", "c", 0, 1, "f"])\n tm.assert_series_equal(s, expected)\n\n # list\n s = Series(list("abc"), dtype=object)\n\n with pytest.raises(ValueError, match=msg("list-like")):\n s[[0, 1, 2]] = list(range(27))\n\n s = Series(list("abc"), dtype=object)\n\n with pytest.raises(ValueError, match=msg("list-like")):\n s[[0, 1, 2]] = list(range(2))\n\n # scalar\n s = Series(list("abc"), dtype=object)\n s[0] = list(range(10))\n expected = Series([list(range(10)), "b", "c"])\n tm.assert_series_equal(s, expected)\n\n\n@pytest.mark.parametrize("size", range(2, 6))\n@pytest.mark.parametrize(\n "mask", [[True, False, False, False, False], [True, False], [False]]\n)\n@pytest.mark.parametrize(\n "item", [2.0, np.nan, np.finfo(float).max, np.finfo(float).min]\n)\n# Test numpy arrays, lists and tuples as the input to be\n# broadcast\n@pytest.mark.parametrize(\n "box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]\n)\ndef test_broadcast(size, mask, item, box):\n # GH#8801, GH#4195\n selection = np.resize(mask, size)\n\n data = np.arange(size, dtype=float)\n\n # Construct the expected series by taking the source\n # data or item based on the selection\n expected = Series(\n [item if use_item else data[i] for i, use_item in enumerate(selection)]\n )\n\n s = Series(data)\n\n s[selection] = item\n tm.assert_series_equal(s, expected)\n\n s = Series(data)\n result = s.where(~selection, box(item))\n tm.assert_series_equal(result, expected)\n\n s = Series(data)\n result = s.mask(selection, box(item))\n tm.assert_series_equal(result, expected)\n\n\ndef test_where_inplace():\n s = Series(np.random.default_rng(2).standard_normal(5))\n cond = s > 0\n\n rs = s.copy()\n\n rs.where(cond, inplace=True)\n tm.assert_series_equal(rs.dropna(), s[cond])\n tm.assert_series_equal(rs, s.where(cond))\n\n rs = s.copy()\n rs.where(cond, -s, inplace=True)\n tm.assert_series_equal(rs, s.where(cond, -s))\n\n\ndef test_where_dups():\n # GH 4550\n # where crashes with dups in index\n s1 = Series(list(range(3)))\n s2 = Series(list(range(3)))\n comb = pd.concat([s1, s2])\n result = comb.where(comb < 2)\n expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n # GH 4548\n # inplace updating not working with dups\n comb[comb < 1] = 5\n expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])\n tm.assert_series_equal(comb, expected)\n\n comb[comb < 2] += 10\n expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])\n tm.assert_series_equal(comb, expected)\n\n\ndef test_where_numeric_with_string():\n # GH 9280\n s = Series([1, 2, 3])\n w = s.where(s > 1, "X")\n\n assert not is_integer(w[0])\n assert is_integer(w[1])\n assert is_integer(w[2])\n assert isinstance(w[0], str)\n assert w.dtype == "object"\n\n w = s.where(s > 1, ["X", "Y", "Z"])\n assert not is_integer(w[0])\n assert is_integer(w[1])\n assert is_integer(w[2])\n assert isinstance(w[0], str)\n assert w.dtype == "object"\n\n w = s.where(s > 1, np.array(["X", "Y", "Z"]))\n assert not is_integer(w[0])\n assert is_integer(w[1])\n assert is_integer(w[2])\n assert isinstance(w[0], str)\n assert w.dtype == "object"\n\n\n@pytest.mark.parametrize("dtype", ["timedelta64[ns]", "datetime64[ns]"])\ndef test_where_datetimelike_coerce(dtype):\n ser = Series([1, 2], dtype=dtype)\n expected = Series([10, 10])\n mask = np.array([False, False])\n\n msg = "Downcasting behavior in Series and DataFrame methods 'where'"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = ser.where(mask, [10, 10])\n tm.assert_series_equal(rs, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = ser.where(mask, 10)\n tm.assert_series_equal(rs, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = ser.where(mask, 10.0)\n tm.assert_series_equal(rs, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = ser.where(mask, [10.0, 10.0])\n tm.assert_series_equal(rs, expected)\n\n rs = ser.where(mask, [10.0, np.nan])\n expected = Series([10, np.nan], dtype="object")\n tm.assert_series_equal(rs, expected)\n\n\ndef test_where_datetimetz():\n # GH 15701\n timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"]\n ser = Series([Timestamp(t) for t in timestamps], dtype="datetime64[ns, UTC]")\n rs = ser.where(Series([False, True]))\n expected = Series([pd.NaT, ser[1]], dtype="datetime64[ns, UTC]")\n tm.assert_series_equal(rs, expected)\n\n\ndef test_where_sparse():\n # GH#17198 make sure we dont get an AttributeError for sp_index\n ser = Series(pd.arrays.SparseArray([1, 2]))\n result = ser.where(ser >= 2, 0)\n expected = Series(pd.arrays.SparseArray([0, 2]))\n tm.assert_series_equal(result, expected)\n\n\ndef test_where_empty_series_and_empty_cond_having_non_bool_dtypes():\n # https://github.com/pandas-dev/pandas/issues/34592\n ser = Series([], dtype=float)\n result = ser.where([])\n tm.assert_series_equal(result, ser)\n\n\ndef test_where_categorical(frame_or_series):\n # https://github.com/pandas-dev/pandas/issues/18888\n exp = frame_or_series(\n pd.Categorical(["A", "A", "B", "B", np.nan], categories=["A", "B", "C"]),\n dtype="category",\n )\n df = frame_or_series(["A", "A", "B", "B", "C"], dtype="category")\n res = df.where(df != "C")\n tm.assert_equal(exp, res)\n\n\ndef test_where_datetimelike_categorical(tz_naive_fixture):\n # GH#37682\n tz = tz_naive_fixture\n\n dr = date_range("2001-01-01", periods=3, tz=tz)._with_freq(None)\n lvals = pd.DatetimeIndex([dr[0], dr[1], pd.NaT])\n rvals = pd.Categorical([dr[0], pd.NaT, dr[2]])\n\n mask = np.array([True, True, False])\n\n # DatetimeIndex.where\n res = lvals.where(mask, rvals)\n tm.assert_index_equal(res, dr)\n\n # DatetimeArray.where\n res = lvals._data._where(mask, rvals)\n tm.assert_datetime_array_equal(res, dr._data)\n\n # Series.where\n res = Series(lvals).where(mask, rvals)\n tm.assert_series_equal(res, Series(dr))\n\n # DataFrame.where\n res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))\n\n tm.assert_frame_equal(res, pd.DataFrame(dr))\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_where.py | test_where.py | Python | 13,398 | 0.95 | 0.054393 | 0.101928 | vue-tools | 929 | 2024-04-20T21:47:08.906844 | BSD-3-Clause | true | c54e04c8b9bb884800408aaecef68e3c |
import numpy as np\nimport pytest\n\nfrom pandas import (\n MultiIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef test_xs_datetimelike_wrapping():\n # GH#31630 a case where we shouldn't wrap datetime64 in Timestamp\n arr = date_range("2016-01-01", periods=3)._data._ndarray\n\n ser = Series(arr, dtype=object)\n for i in range(len(ser)):\n ser.iloc[i] = arr[i]\n assert ser.dtype == object\n assert isinstance(ser[0], np.datetime64)\n\n result = ser.xs(0)\n assert isinstance(result, np.datetime64)\n\n\nclass TestXSWithMultiIndex:\n def test_xs_level_series(self, multiindex_dataframe_random_data):\n df = multiindex_dataframe_random_data\n ser = df["A"]\n expected = ser[:, "two"]\n result = df.xs("two", level=1)["A"]\n tm.assert_series_equal(result, expected)\n\n def test_series_getitem_multiindex_xs_by_label(self):\n # GH#5684\n idx = MultiIndex.from_tuples(\n [("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")]\n )\n ser = Series([1, 2, 3, 4], index=idx)\n return_value = ser.index.set_names(["L1", "L2"], inplace=True)\n assert return_value is None\n expected = Series([1, 3], index=["a", "b"])\n return_value = expected.index.set_names(["L1"], inplace=True)\n assert return_value is None\n\n result = ser.xs("one", level="L2")\n tm.assert_series_equal(result, expected)\n\n def test_series_getitem_multiindex_xs(self):\n # GH#6258\n dt = list(date_range("20130903", periods=3))\n idx = MultiIndex.from_product([list("AB"), dt])\n ser = Series([1, 3, 4, 1, 3, 4], index=idx)\n expected = Series([1, 1], index=list("AB"))\n\n result = ser.xs("20130903", level=1)\n tm.assert_series_equal(result, expected)\n\n def test_series_xs_droplevel_false(self):\n # GH: 19056\n mi = MultiIndex.from_tuples(\n [("a", "x"), ("a", "y"), ("b", "x")], names=["level1", "level2"]\n )\n ser = Series([1, 1, 1], index=mi)\n result = ser.xs("a", axis=0, drop_level=False)\n expected = Series(\n [1, 1],\n index=MultiIndex.from_tuples(\n [("a", "x"), ("a", "y")], names=["level1", "level2"]\n ),\n )\n tm.assert_series_equal(result, expected)\n\n def test_xs_key_as_list(self):\n # GH#41760\n mi = MultiIndex.from_tuples([("a", "x")], names=["level1", "level2"])\n ser = Series([1], index=mi)\n with pytest.raises(TypeError, match="list keys are not supported"):\n ser.xs(["a", "x"], axis=0, drop_level=False)\n\n with pytest.raises(TypeError, match="list keys are not supported"):\n ser.xs(["a"], axis=0, drop_level=False)\n | .venv\Lib\site-packages\pandas\tests\series\indexing\test_xs.py | test_xs.py | Python | 2,760 | 0.95 | 0.097561 | 0.073529 | vue-tools | 432 | 2023-11-01T00:16:29.690868 | GPL-3.0 | true | c7d6a1b89e047a918a2a9edb686fb4ec |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_datetime.cpython-313.pyc | test_datetime.cpython-313.pyc | Other | 23,125 | 0.8 | 0.003436 | 0.006993 | awesome-app | 842 | 2024-06-22T10:30:38.676991 | Apache-2.0 | true | db39cb6a71e5b7e81147ec8621ce1655 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_delitem.cpython-313.pyc | test_delitem.cpython-313.pyc | Other | 3,470 | 0.8 | 0 | 0 | react-lib | 68 | 2024-10-06T13:20:54.742180 | MIT | true | 8ad7afced53c23c19966cea306645e6c |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_get.cpython-313.pyc | test_get.cpython-313.pyc | Other | 8,310 | 0.8 | 0 | 0.022989 | vue-tools | 915 | 2024-06-07T21:58:00.941053 | GPL-3.0 | true | 84f3dd83706e8972e2adda3b089b1ac7 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_getitem.cpython-313.pyc | test_getitem.cpython-313.pyc | Other | 42,289 | 0.8 | 0.003839 | 0.021277 | node-utils | 428 | 2025-05-06T01:26:12.135571 | Apache-2.0 | true | b3e2a4323dc669e4298eb3d4887d193b |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_indexing.cpython-313.pyc | test_indexing.cpython-313.pyc | Other | 29,829 | 0.8 | 0.005917 | 0.009063 | vue-tools | 614 | 2024-07-31T17:29:17.935975 | MIT | true | b1240a1dd83a8ea628b056aae04c030a |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_mask.cpython-313.pyc | test_mask.cpython-313.pyc | Other | 3,989 | 0.8 | 0 | 0 | awesome-app | 583 | 2025-03-16T20:02:49.315344 | GPL-3.0 | true | f0e08fdbd700597636efa856073df113 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_setitem.cpython-313.pyc | test_setitem.cpython-313.pyc | Other | 97,147 | 0.6 | 0.005464 | 0.006682 | node-utils | 533 | 2025-06-11T11:03:24.450692 | Apache-2.0 | true | 416ce32c57c7004828bcdd1cbb2f302d |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_set_value.cpython-313.pyc | test_set_value.cpython-313.pyc | Other | 1,975 | 0.8 | 0 | 0.041667 | node-utils | 707 | 2024-11-21T00:46:50.648750 | MIT | true | 56b6e1ee43f75c53c10bdcf6c1e1147e |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_take.cpython-313.pyc | test_take.cpython-313.pyc | Other | 2,931 | 0.95 | 0.019231 | 0 | node-utils | 590 | 2024-12-30T04:41:09.033023 | MIT | true | 1aa6214a7a69ba0f4b923f8b373161d4 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_where.cpython-313.pyc | test_where.cpython-313.pyc | Other | 25,326 | 0.8 | 0.003521 | 0.02518 | awesome-app | 310 | 2025-02-16T19:00:47.749305 | Apache-2.0 | true | 8d1c1943da9b68fdaa9188ddf156628b |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\test_xs.cpython-313.pyc | test_xs.cpython-313.pyc | Other | 4,950 | 0.8 | 0 | 0.073529 | awesome-app | 908 | 2023-10-04T04:07:17.169107 | MIT | true | dac58083f1d8e7632055bf6e548814c4 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\indexing\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 203 | 0.7 | 0 | 0 | react-lib | 785 | 2025-05-29T21:53:03.228247 | BSD-3-Clause | true | 980c9aeb653690ffb7858367bed01e52 |
import pytest\n\nfrom pandas import Index\nimport pandas._testing as tm\n\n\ndef test_add_prefix_suffix(string_series):\n with_prefix = string_series.add_prefix("foo#")\n expected = Index([f"foo#{c}" for c in string_series.index])\n tm.assert_index_equal(with_prefix.index, expected)\n\n with_suffix = string_series.add_suffix("#foo")\n expected = Index([f"{c}#foo" for c in string_series.index])\n tm.assert_index_equal(with_suffix.index, expected)\n\n with_pct_prefix = string_series.add_prefix("%")\n expected = Index([f"%{c}" for c in string_series.index])\n tm.assert_index_equal(with_pct_prefix.index, expected)\n\n with_pct_suffix = string_series.add_suffix("%")\n expected = Index([f"{c}%" for c in string_series.index])\n tm.assert_index_equal(with_pct_suffix.index, expected)\n\n\ndef test_add_prefix_suffix_axis(string_series):\n # GH 47819\n with_prefix = string_series.add_prefix("foo#", axis=0)\n expected = Index([f"foo#{c}" for c in string_series.index])\n tm.assert_index_equal(with_prefix.index, expected)\n\n with_pct_suffix = string_series.add_suffix("#foo", axis=0)\n expected = Index([f"{c}#foo" for c in string_series.index])\n tm.assert_index_equal(with_pct_suffix.index, expected)\n\n\ndef test_add_prefix_suffix_invalid_axis(string_series):\n with pytest.raises(ValueError, match="No axis named 1 for object type Series"):\n string_series.add_prefix("foo#", axis=1)\n\n with pytest.raises(ValueError, match="No axis named 1 for object type Series"):\n string_series.add_suffix("foo#", axis=1)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_add_prefix_suffix.py | test_add_prefix_suffix.py | Python | 1,556 | 0.95 | 0.268293 | 0.034483 | react-lib | 503 | 2023-08-26T12:59:45.844653 | GPL-3.0 | true | 2be0b281ba7266ee32de8efbf968ceed |
from datetime import timezone\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Series,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "first_slice,second_slice",\n [\n [[2, None], [None, -5]],\n [[None, 0], [None, -5]],\n [[None, -5], [None, 0]],\n [[None, 0], [None, 0]],\n ],\n)\n@pytest.mark.parametrize("fill", [None, -1])\ndef test_align(datetime_series, first_slice, second_slice, join_type, fill):\n a = datetime_series[slice(*first_slice)]\n b = datetime_series[slice(*second_slice)]\n\n aa, ab = a.align(b, join=join_type, fill_value=fill)\n\n join_index = a.index.join(b.index, how=join_type)\n if fill is not None:\n diff_a = aa.index.difference(join_index)\n diff_b = ab.index.difference(join_index)\n if len(diff_a) > 0:\n assert (aa.reindex(diff_a) == fill).all()\n if len(diff_b) > 0:\n assert (ab.reindex(diff_b) == fill).all()\n\n ea = a.reindex(join_index)\n eb = b.reindex(join_index)\n\n if fill is not None:\n ea = ea.fillna(fill)\n eb = eb.fillna(fill)\n\n tm.assert_series_equal(aa, ea)\n tm.assert_series_equal(ab, eb)\n assert aa.name == "ts"\n assert ea.name == "ts"\n assert ab.name == "ts"\n assert eb.name == "ts"\n\n\n@pytest.mark.parametrize(\n "first_slice,second_slice",\n [\n [[2, None], [None, -5]],\n [[None, 0], [None, -5]],\n [[None, -5], [None, 0]],\n [[None, 0], [None, 0]],\n ],\n)\n@pytest.mark.parametrize("method", ["pad", "bfill"])\n@pytest.mark.parametrize("limit", [None, 1])\ndef test_align_fill_method(\n datetime_series, first_slice, second_slice, join_type, method, limit\n):\n a = datetime_series[slice(*first_slice)]\n b = datetime_series[slice(*second_slice)]\n\n msg = (\n "The 'method', 'limit', and 'fill_axis' keywords in Series.align "\n "are deprecated"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n aa, ab = a.align(b, join=join_type, method=method, limit=limit)\n\n join_index = a.index.join(b.index, how=join_type)\n ea = a.reindex(join_index)\n eb = b.reindex(join_index)\n\n msg2 = "Series.fillna with 'method' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n ea = ea.fillna(method=method, limit=limit)\n eb = eb.fillna(method=method, limit=limit)\n\n tm.assert_series_equal(aa, ea)\n tm.assert_series_equal(ab, eb)\n\n\ndef test_align_nocopy(datetime_series, using_copy_on_write):\n b = datetime_series[:5].copy()\n\n # do copy\n a = datetime_series.copy()\n ra, _ = a.align(b, join="left")\n ra[:5] = 5\n assert not (a[:5] == 5).any()\n\n # do not copy\n a = datetime_series.copy()\n ra, _ = a.align(b, join="left", copy=False)\n ra[:5] = 5\n if using_copy_on_write:\n assert not (a[:5] == 5).any()\n else:\n assert (a[:5] == 5).all()\n\n # do copy\n a = datetime_series.copy()\n b = datetime_series[:5].copy()\n _, rb = a.align(b, join="right")\n rb[:3] = 5\n assert not (b[:3] == 5).any()\n\n # do not copy\n a = datetime_series.copy()\n b = datetime_series[:5].copy()\n _, rb = a.align(b, join="right", copy=False)\n rb[:2] = 5\n if using_copy_on_write:\n assert not (b[:2] == 5).any()\n else:\n assert (b[:2] == 5).all()\n\n\ndef test_align_same_index(datetime_series, using_copy_on_write):\n a, b = datetime_series.align(datetime_series, copy=False)\n if not using_copy_on_write:\n assert a.index is datetime_series.index\n assert b.index is datetime_series.index\n else:\n assert a.index.is_(datetime_series.index)\n assert b.index.is_(datetime_series.index)\n\n a, b = datetime_series.align(datetime_series, copy=True)\n assert a.index is not datetime_series.index\n assert b.index is not datetime_series.index\n assert a.index.is_(datetime_series.index)\n assert b.index.is_(datetime_series.index)\n\n\ndef test_align_multiindex():\n # GH 10665\n\n midx = pd.MultiIndex.from_product(\n [range(2), range(3), range(2)], names=("a", "b", "c")\n )\n idx = pd.Index(range(2), name="b")\n s1 = Series(np.arange(12, dtype="int64"), index=midx)\n s2 = Series(np.arange(2, dtype="int64"), index=idx)\n\n # these must be the same results (but flipped)\n res1l, res1r = s1.align(s2, join="left")\n res2l, res2r = s2.align(s1, join="right")\n\n expl = s1\n tm.assert_series_equal(expl, res1l)\n tm.assert_series_equal(expl, res2r)\n expr = Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)\n tm.assert_series_equal(expr, res1r)\n tm.assert_series_equal(expr, res2l)\n\n res1l, res1r = s1.align(s2, join="right")\n res2l, res2r = s2.align(s1, join="left")\n\n exp_idx = pd.MultiIndex.from_product(\n [range(2), range(2), range(2)], names=("a", "b", "c")\n )\n expl = Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)\n tm.assert_series_equal(expl, res1l)\n tm.assert_series_equal(expl, res2r)\n expr = Series([0, 0, 1, 1] * 2, index=exp_idx)\n tm.assert_series_equal(expr, res1r)\n tm.assert_series_equal(expr, res2l)\n\n\n@pytest.mark.parametrize("method", ["backfill", "bfill", "pad", "ffill", None])\ndef test_align_with_dataframe_method(method):\n # GH31788\n ser = Series(range(3), index=range(3))\n df = pd.DataFrame(0.0, index=range(3), columns=range(3))\n\n msg = (\n "The 'method', 'limit', and 'fill_axis' keywords in Series.align "\n "are deprecated"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result_ser, result_df = ser.align(df, method=method)\n tm.assert_series_equal(result_ser, ser)\n tm.assert_frame_equal(result_df, df)\n\n\ndef test_align_dt64tzindex_mismatched_tzs():\n idx1 = date_range("2001", periods=5, freq="h", tz="US/Eastern")\n ser = Series(np.random.default_rng(2).standard_normal(len(idx1)), index=idx1)\n ser_central = ser.tz_convert("US/Central")\n # different timezones convert to UTC\n\n new1, new2 = ser.align(ser_central)\n assert new1.index.tz is timezone.utc\n assert new2.index.tz is timezone.utc\n\n\ndef test_align_periodindex(join_type):\n rng = period_range("1/1/2000", "1/1/2010", freq="Y")\n ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)\n\n # TODO: assert something?\n ts.align(ts[::2], join=join_type)\n\n\ndef test_align_stringindex(any_string_dtype):\n left = Series(range(3), index=pd.Index(["a", "b", "d"], dtype=any_string_dtype))\n right = Series(range(3), index=pd.Index(["a", "b", "c"], dtype=any_string_dtype))\n result_left, result_right = left.align(right)\n\n expected_idx = pd.Index(["a", "b", "c", "d"], dtype=any_string_dtype)\n expected_left = Series([0, 1, np.nan, 2], index=expected_idx)\n expected_right = Series([0, 1, 2, np.nan], index=expected_idx)\n\n tm.assert_series_equal(result_left, expected_left)\n tm.assert_series_equal(result_right, expected_right)\n\n\ndef test_align_left_fewer_levels():\n # GH#45224\n left = Series([2], index=pd.MultiIndex.from_tuples([(1, 3)], names=["a", "c"]))\n right = Series(\n [1], index=pd.MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"])\n )\n result_left, result_right = left.align(right)\n\n expected_right = Series(\n [1], index=pd.MultiIndex.from_tuples([(1, 3, 2)], names=["a", "c", "b"])\n )\n expected_left = Series(\n [2], index=pd.MultiIndex.from_tuples([(1, 3, 2)], names=["a", "c", "b"])\n )\n tm.assert_series_equal(result_left, expected_left)\n tm.assert_series_equal(result_right, expected_right)\n\n\ndef test_align_left_different_named_levels():\n # GH#45224\n left = Series(\n [2], index=pd.MultiIndex.from_tuples([(1, 4, 3)], names=["a", "d", "c"])\n )\n right = Series(\n [1], index=pd.MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"])\n )\n result_left, result_right = left.align(right)\n\n expected_left = Series(\n [2], index=pd.MultiIndex.from_tuples([(1, 4, 3, 2)], names=["a", "d", "c", "b"])\n )\n expected_right = Series(\n [1], index=pd.MultiIndex.from_tuples([(1, 4, 3, 2)], names=["a", "d", "c", "b"])\n )\n tm.assert_series_equal(result_left, expected_left)\n tm.assert_series_equal(result_right, expected_right)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_align.py | test_align.py | Python | 8,290 | 0.95 | 0.068702 | 0.051887 | react-lib | 365 | 2024-12-03T22:24:12.030453 | GPL-3.0 | true | 1083b5d5398acf2994aa84e7abb8c730 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Series,\n Timestamp,\n isna,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesArgsort:\n def test_argsort_axis(self):\n # GH#54257\n ser = Series(range(3))\n\n msg = "No axis named 2 for object type Series"\n with pytest.raises(ValueError, match=msg):\n ser.argsort(axis=2)\n\n def test_argsort_numpy(self, datetime_series):\n ser = datetime_series\n\n res = np.argsort(ser).values\n expected = np.argsort(np.array(ser))\n tm.assert_numpy_array_equal(res, expected)\n\n # with missing values\n ts = ser.copy()\n ts[::2] = np.nan\n\n msg = "The behavior of Series.argsort in the presence of NA values"\n with tm.assert_produces_warning(\n FutureWarning, match=msg, check_stacklevel=False\n ):\n result = np.argsort(ts)[1::2]\n expected = np.argsort(np.array(ts.dropna()))\n\n tm.assert_numpy_array_equal(result.values, expected)\n\n def test_argsort(self, datetime_series):\n argsorted = datetime_series.argsort()\n assert issubclass(argsorted.dtype.type, np.integer)\n\n def test_argsort_dt64(self, unit):\n # GH#2967 (introduced bug in 0.11-dev I think)\n ser = Series(\n [Timestamp(f"201301{i:02d}") for i in range(1, 6)], dtype=f"M8[{unit}]"\n )\n assert ser.dtype == f"datetime64[{unit}]"\n shifted = ser.shift(-1)\n assert shifted.dtype == f"datetime64[{unit}]"\n assert isna(shifted[4])\n\n result = ser.argsort()\n expected = Series(range(5), dtype=np.intp)\n tm.assert_series_equal(result, expected)\n\n msg = "The behavior of Series.argsort in the presence of NA values"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = shifted.argsort()\n expected = Series(list(range(4)) + [-1], dtype=np.intp)\n tm.assert_series_equal(result, expected)\n\n def test_argsort_stable(self):\n ser = Series(np.random.default_rng(2).integers(0, 100, size=10000))\n mindexer = ser.argsort(kind="mergesort")\n qindexer = ser.argsort()\n\n mexpected = np.argsort(ser.values, kind="mergesort")\n qexpected = np.argsort(ser.values, kind="quicksort")\n\n tm.assert_series_equal(mindexer.astype(np.intp), Series(mexpected))\n tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected))\n msg = (\n r"ndarray Expected type <class 'numpy\.ndarray'>, "\n r"found <class 'pandas\.core\.series\.Series'> instead"\n )\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(qindexer, mindexer)\n\n def test_argsort_preserve_name(self, datetime_series):\n result = datetime_series.argsort()\n assert result.name == datetime_series.name\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_argsort.py | test_argsort.py | Python | 2,871 | 0.95 | 0.130952 | 0.044776 | react-lib | 863 | 2025-01-14T20:10:59.119421 | MIT | true | 9e9e1887eb191b82ea6fa209a4901027 |
import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import IncompatibleFrequency\n\nfrom pandas import (\n DatetimeIndex,\n PeriodIndex,\n Series,\n Timestamp,\n date_range,\n isna,\n notna,\n offsets,\n period_range,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesAsof:\n def test_asof_nanosecond_index_access(self):\n ts = Timestamp("20130101").as_unit("ns")._value\n dti = DatetimeIndex([ts + 50 + i for i in range(100)])\n ser = Series(np.random.default_rng(2).standard_normal(100), index=dti)\n\n first_value = ser.asof(ser.index[0])\n\n # GH#46903 previously incorrectly was "day"\n assert dti.resolution == "nanosecond"\n\n # this used to not work bc parsing was done by dateutil that didn't\n # handle nanoseconds\n assert first_value == ser["2013-01-01 00:00:00.000000050"]\n\n expected_ts = np.datetime64("2013-01-01 00:00:00.000000050", "ns")\n assert first_value == ser[Timestamp(expected_ts)]\n\n def test_basic(self):\n # array or list or dates\n N = 50\n rng = date_range("1/1/1990", periods=N, freq="53s")\n ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)\n ts.iloc[15:30] = np.nan\n dates = date_range("1/1/1990", periods=N * 3, freq="25s")\n\n result = ts.asof(dates)\n assert notna(result).all()\n lb = ts.index[14]\n ub = ts.index[30]\n\n result = ts.asof(list(dates))\n assert notna(result).all()\n lb = ts.index[14]\n ub = ts.index[30]\n\n mask = (result.index >= lb) & (result.index < ub)\n rs = result[mask]\n assert (rs == ts[lb]).all()\n\n val = result[result.index[result.index >= ub][0]]\n assert ts[ub] == val\n\n def test_scalar(self):\n N = 30\n rng = date_range("1/1/1990", periods=N, freq="53s")\n # Explicit cast to float avoid implicit cast when setting nan\n ts = Series(np.arange(N), index=rng, dtype="float")\n ts.iloc[5:10] = np.nan\n ts.iloc[15:20] = np.nan\n\n val1 = ts.asof(ts.index[7])\n val2 = ts.asof(ts.index[19])\n\n assert val1 == ts.iloc[4]\n assert val2 == ts.iloc[14]\n\n # accepts strings\n val1 = ts.asof(str(ts.index[7]))\n assert val1 == ts.iloc[4]\n\n # in there\n result = ts.asof(ts.index[3])\n assert result == ts.iloc[3]\n\n # no as of value\n d = ts.index[0] - offsets.BDay()\n assert np.isnan(ts.asof(d))\n\n def test_with_nan(self):\n # basic asof test\n rng = date_range("1/1/2000", "1/2/2000", freq="4h")\n s = Series(np.arange(len(rng)), index=rng)\n r = s.resample("2h").mean()\n\n result = r.asof(r.index)\n expected = Series(\n [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.0],\n index=date_range("1/1/2000", "1/2/2000", freq="2h"),\n )\n tm.assert_series_equal(result, expected)\n\n r.iloc[3:5] = np.nan\n result = r.asof(r.index)\n expected = Series(\n [0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.0],\n index=date_range("1/1/2000", "1/2/2000", freq="2h"),\n )\n tm.assert_series_equal(result, expected)\n\n r.iloc[-3:] = np.nan\n result = r.asof(r.index)\n expected = Series(\n [0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.0],\n index=date_range("1/1/2000", "1/2/2000", freq="2h"),\n )\n tm.assert_series_equal(result, expected)\n\n def test_periodindex(self):\n # array or list or dates\n N = 50\n rng = period_range("1/1/1990", periods=N, freq="h")\n ts = Series(np.random.default_rng(2).standard_normal(N), index=rng)\n ts.iloc[15:30] = np.nan\n dates = date_range("1/1/1990", periods=N * 3, freq="37min")\n\n result = ts.asof(dates)\n assert notna(result).all()\n lb = ts.index[14]\n ub = ts.index[30]\n\n result = ts.asof(list(dates))\n assert notna(result).all()\n lb = ts.index[14]\n ub = ts.index[30]\n\n pix = PeriodIndex(result.index.values, freq="h")\n mask = (pix >= lb) & (pix < ub)\n rs = result[mask]\n assert (rs == ts[lb]).all()\n\n ts.iloc[5:10] = np.nan\n ts.iloc[15:20] = np.nan\n\n val1 = ts.asof(ts.index[7])\n val2 = ts.asof(ts.index[19])\n\n assert val1 == ts.iloc[4]\n assert val2 == ts.iloc[14]\n\n # accepts strings\n val1 = ts.asof(str(ts.index[7]))\n assert val1 == ts.iloc[4]\n\n # in there\n assert ts.asof(ts.index[3]) == ts.iloc[3]\n\n # no as of value\n d = ts.index[0].to_timestamp() - offsets.BDay()\n assert isna(ts.asof(d))\n\n # Mismatched freq\n msg = "Input has different freq"\n with pytest.raises(IncompatibleFrequency, match=msg):\n ts.asof(rng.asfreq("D"))\n\n def test_errors(self):\n s = Series(\n [1, 2, 3],\n index=[Timestamp("20130101"), Timestamp("20130103"), Timestamp("20130102")],\n )\n\n # non-monotonic\n assert not s.index.is_monotonic_increasing\n with pytest.raises(ValueError, match="requires a sorted index"):\n s.asof(s.index[0])\n\n # subset with Series\n N = 10\n rng = date_range("1/1/1990", periods=N, freq="53s")\n s = Series(np.random.default_rng(2).standard_normal(N), index=rng)\n with pytest.raises(ValueError, match="not valid for Series"):\n s.asof(s.index[0], subset="foo")\n\n def test_all_nans(self):\n # GH 15713\n # series is all nans\n\n # testing non-default indexes\n N = 50\n rng = date_range("1/1/1990", periods=N, freq="53s")\n\n dates = date_range("1/1/1990", periods=N * 3, freq="25s")\n result = Series(np.nan, index=rng).asof(dates)\n expected = Series(np.nan, index=dates)\n tm.assert_series_equal(result, expected)\n\n # testing scalar input\n date = date_range("1/1/1990", periods=N * 3, freq="25s")[0]\n result = Series(np.nan, index=rng).asof(date)\n assert isna(result)\n\n # test name is propagated\n result = Series(np.nan, index=[1, 2, 3, 4], name="test").asof([4, 5])\n expected = Series(np.nan, index=[4, 5], name="test")\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_asof.py | test_asof.py | Python | 6,324 | 0.95 | 0.04878 | 0.128834 | node-utils | 765 | 2024-09-18T04:08:41.748050 | Apache-2.0 | true | 17750bcb7b9833f749885c158ae0d519 |
from datetime import (\n datetime,\n timedelta,\n)\nfrom importlib import reload\nimport string\nimport sys\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import iNaT\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n NA,\n Categorical,\n CategoricalDtype,\n DatetimeTZDtype,\n Index,\n Interval,\n NaT,\n Series,\n Timedelta,\n Timestamp,\n cut,\n date_range,\n to_datetime,\n)\nimport pandas._testing as tm\n\n\ndef rand_str(nchars: int) -> str:\n """\n Generate one random byte string.\n """\n RANDS_CHARS = np.array(\n list(string.ascii_letters + string.digits), dtype=(np.str_, 1)\n )\n return "".join(np.random.default_rng(2).choice(RANDS_CHARS, nchars))\n\n\nclass TestAstypeAPI:\n def test_astype_unitless_dt64_raises(self):\n # GH#47844\n ser = Series(["1970-01-01", "1970-01-01", "1970-01-01"], dtype="datetime64[ns]")\n df = ser.to_frame()\n\n msg = "Casting to unit-less dtype 'datetime64' is not supported"\n with pytest.raises(TypeError, match=msg):\n ser.astype(np.datetime64)\n with pytest.raises(TypeError, match=msg):\n df.astype(np.datetime64)\n with pytest.raises(TypeError, match=msg):\n ser.astype("datetime64")\n with pytest.raises(TypeError, match=msg):\n df.astype("datetime64")\n\n def test_arg_for_errors_in_astype(self):\n # see GH#14878\n ser = Series([1, 2, 3])\n\n msg = (\n r"Expected value of kwarg 'errors' to be one of \['raise', "\n r"'ignore'\]\. Supplied value is 'False'"\n )\n with pytest.raises(ValueError, match=msg):\n ser.astype(np.float64, errors=False)\n\n ser.astype(np.int8, errors="raise")\n\n @pytest.mark.parametrize("dtype_class", [dict, Series])\n def test_astype_dict_like(self, dtype_class):\n # see GH#7271\n ser = Series(range(0, 10, 2), name="abc")\n\n dt1 = dtype_class({"abc": str})\n result = ser.astype(dt1)\n expected = Series(["0", "2", "4", "6", "8"], name="abc", dtype="str")\n tm.assert_series_equal(result, expected)\n\n dt2 = dtype_class({"abc": "float64"})\n result = ser.astype(dt2)\n expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")\n tm.assert_series_equal(result, expected)\n\n dt3 = dtype_class({"abc": str, "def": str})\n msg = (\n "Only the Series name can be used for the key in Series dtype "\n r"mappings\."\n )\n with pytest.raises(KeyError, match=msg):\n ser.astype(dt3)\n\n dt4 = dtype_class({0: str})\n with pytest.raises(KeyError, match=msg):\n ser.astype(dt4)\n\n # GH#16717\n # if dtypes provided is empty, it should error\n if dtype_class is Series:\n dt5 = dtype_class({}, dtype=object)\n else:\n dt5 = dtype_class({})\n\n with pytest.raises(KeyError, match=msg):\n ser.astype(dt5)\n\n\nclass TestAstype:\n @pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"])\n def test_astype_object_to_dt64_non_nano(self, tz):\n # GH#55756, GH#54620\n ts = Timestamp("2999-01-01")\n dtype = "M8[us]"\n if tz is not None:\n dtype = f"M8[us, {tz}]"\n vals = [ts, "2999-01-02 03:04:05.678910", 2500]\n ser = Series(vals, dtype=object)\n result = ser.astype(dtype)\n\n # The 2500 is interpreted as microseconds, consistent with what\n # we would get if we created DatetimeIndexes from vals[:2] and vals[2:]\n # and concated the results.\n pointwise = [\n vals[0].tz_localize(tz),\n Timestamp(vals[1], tz=tz),\n to_datetime(vals[2], unit="us", utc=True).tz_convert(tz),\n ]\n exp_vals = [x.as_unit("us").asm8 for x in pointwise]\n exp_arr = np.array(exp_vals, dtype="M8[us]")\n expected = Series(exp_arr, dtype="M8[us]")\n if tz is not None:\n expected = expected.dt.tz_localize("UTC").dt.tz_convert(tz)\n tm.assert_series_equal(result, expected)\n\n def test_astype_mixed_object_to_dt64tz(self):\n # pre-2.0 this raised ValueError bc of tz mismatch\n # xref GH#32581\n ts = Timestamp("2016-01-04 05:06:07", tz="US/Pacific")\n ts2 = ts.tz_convert("Asia/Tokyo")\n\n ser = Series([ts, ts2], dtype=object)\n res = ser.astype("datetime64[ns, Europe/Brussels]")\n expected = Series(\n [ts.tz_convert("Europe/Brussels"), ts2.tz_convert("Europe/Brussels")],\n dtype="datetime64[ns, Europe/Brussels]",\n )\n tm.assert_series_equal(res, expected)\n\n @pytest.mark.parametrize("dtype", np.typecodes["All"])\n def test_astype_empty_constructor_equality(self, dtype):\n # see GH#15524\n\n if dtype not in (\n "S",\n "V", # poor support (if any) currently\n "M",\n "m", # Generic timestamps raise a ValueError. Already tested.\n ):\n init_empty = Series([], dtype=dtype)\n as_type_empty = Series([]).astype(dtype)\n tm.assert_series_equal(init_empty, as_type_empty)\n\n @pytest.mark.parametrize("dtype", [str, np.str_])\n @pytest.mark.parametrize(\n "series",\n [\n Series([string.digits * 10, rand_str(63), rand_str(64), rand_str(1000)]),\n Series([string.digits * 10, rand_str(63), rand_str(64), np.nan, 1.0]),\n ],\n )\n def test_astype_str_map(self, dtype, series, using_infer_string):\n # see GH#4405\n using_string_dtype = using_infer_string and dtype is str\n result = series.astype(dtype)\n if using_string_dtype:\n expected = series.map(lambda val: str(val) if val is not np.nan else np.nan)\n else:\n expected = series.map(str)\n if using_infer_string:\n expected = expected.astype(object)\n tm.assert_series_equal(result, expected)\n\n def test_astype_float_to_period(self):\n result = Series([np.nan]).astype("period[D]")\n expected = Series([NaT], dtype="period[D]")\n tm.assert_series_equal(result, expected)\n\n def test_astype_no_pandas_dtype(self):\n # https://github.com/pandas-dev/pandas/pull/24866\n ser = Series([1, 2], dtype="int64")\n # Don't have NumpyEADtype in the public API, so we use `.array.dtype`,\n # which is a NumpyEADtype.\n result = ser.astype(ser.array.dtype)\n tm.assert_series_equal(result, ser)\n\n @pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])\n def test_astype_generic_timestamp_no_frequency(self, dtype, request):\n # see GH#15524, GH#15987\n data = [1]\n ser = Series(data)\n\n if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:\n mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")\n request.applymarker(mark)\n\n msg = (\n rf"The '{dtype.__name__}' dtype has no unit\. "\n rf"Please pass in '{dtype.__name__}\[ns\]' instead."\n )\n with pytest.raises(ValueError, match=msg):\n ser.astype(dtype)\n\n def test_astype_dt64_to_str(self):\n # GH#10442 : testing astype(str) is correct for Series/DatetimeIndex\n dti = date_range("2012-01-01", periods=3)\n result = Series(dti).astype(str)\n expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype="str")\n tm.assert_series_equal(result, expected)\n\n def test_astype_dt64tz_to_str(self):\n # GH#10442 : testing astype(str) is correct for Series/DatetimeIndex\n dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")\n result = Series(dti_tz).astype(str)\n expected = Series(\n [\n "2012-01-01 00:00:00-05:00",\n "2012-01-02 00:00:00-05:00",\n "2012-01-03 00:00:00-05:00",\n ],\n dtype="str",\n )\n tm.assert_series_equal(result, expected)\n\n def test_astype_datetime(self, unit):\n ser = Series(iNaT, dtype=f"M8[{unit}]", index=range(5))\n\n ser = ser.astype("O")\n assert ser.dtype == np.object_\n\n ser = Series([datetime(2001, 1, 2, 0, 0)])\n\n ser = ser.astype("O")\n assert ser.dtype == np.object_\n\n ser = Series(\n [datetime(2001, 1, 2, 0, 0) for i in range(3)], dtype=f"M8[{unit}]"\n )\n\n ser[1] = np.nan\n assert ser.dtype == f"M8[{unit}]"\n\n ser = ser.astype("O")\n assert ser.dtype == np.object_\n\n def test_astype_datetime64tz(self):\n ser = Series(date_range("20130101", periods=3, tz="US/Eastern"))\n\n # astype\n result = ser.astype(object)\n expected = Series(ser.astype(object), dtype=object)\n tm.assert_series_equal(result, expected)\n\n result = Series(ser.values).dt.tz_localize("UTC").dt.tz_convert(ser.dt.tz)\n tm.assert_series_equal(result, ser)\n\n # astype - object, preserves on construction\n result = Series(ser.astype(object))\n expected = ser.astype(object)\n tm.assert_series_equal(result, expected)\n\n # astype - datetime64[ns, tz]\n msg = "Cannot use .astype to convert from timezone-naive"\n with pytest.raises(TypeError, match=msg):\n # dt64->dt64tz astype deprecated\n Series(ser.values).astype("datetime64[ns, US/Eastern]")\n\n with pytest.raises(TypeError, match=msg):\n # dt64->dt64tz astype deprecated\n Series(ser.values).astype(ser.dtype)\n\n result = ser.astype("datetime64[ns, CET]")\n expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))\n tm.assert_series_equal(result, expected)\n\n def test_astype_str_cast_dt64(self):\n # see GH#9757\n ts = Series([Timestamp("2010-01-04 00:00:00")])\n res = ts.astype(str)\n\n expected = Series(["2010-01-04"], dtype="str")\n tm.assert_series_equal(res, expected)\n\n ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])\n res = ts.astype(str)\n\n expected = Series(["2010-01-04 00:00:00-05:00"], dtype="str")\n tm.assert_series_equal(res, expected)\n\n def test_astype_str_cast_td64(self):\n # see GH#9757\n\n td = Series([Timedelta(1, unit="d")])\n ser = td.astype(str)\n\n expected = Series(["1 days"], dtype="str")\n tm.assert_series_equal(ser, expected)\n\n def test_dt64_series_astype_object(self):\n dt64ser = Series(date_range("20130101", periods=3))\n result = dt64ser.astype(object)\n assert isinstance(result.iloc[0], datetime)\n assert result.dtype == np.object_\n\n def test_td64_series_astype_object(self):\n tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")\n result = tdser.astype(object)\n assert isinstance(result.iloc[0], timedelta)\n assert result.dtype == np.object_\n\n @pytest.mark.parametrize(\n "data, dtype",\n [\n (["x", "y", "z"], "string[python]"),\n pytest.param(\n ["x", "y", "z"],\n "string[pyarrow]",\n marks=td.skip_if_no("pyarrow"),\n ),\n (["x", "y", "z"], "category"),\n (3 * [Timestamp("2020-01-01", tz="UTC")], None),\n (3 * [Interval(0, 1)], None),\n ],\n )\n @pytest.mark.parametrize("errors", ["raise", "ignore"])\n def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors):\n # https://github.com/pandas-dev/pandas/issues/35471\n ser = Series(data, dtype=dtype)\n if errors == "ignore":\n expected = ser\n result = ser.astype(float, errors="ignore")\n tm.assert_series_equal(result, expected)\n else:\n msg = "(Cannot cast)|(could not convert)"\n with pytest.raises((ValueError, TypeError), match=msg):\n ser.astype(float, errors=errors)\n\n @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])\n def test_astype_from_float_to_str(self, dtype):\n # https://github.com/pandas-dev/pandas/issues/36451\n ser = Series([0.1], dtype=dtype)\n result = ser.astype(str)\n expected = Series(["0.1"], dtype="str")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "value, string_value",\n [\n (None, "None"),\n (np.nan, "nan"),\n (NA, "<NA>"),\n ],\n )\n def test_astype_to_str_preserves_na(self, value, string_value, using_infer_string):\n # https://github.com/pandas-dev/pandas/issues/36904\n ser = Series(["a", "b", value], dtype=object)\n result = ser.astype(str)\n expected = Series(\n ["a", "b", None if using_infer_string else string_value], dtype="str"\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])\n def test_astype(self, dtype):\n ser = Series(np.random.default_rng(2).standard_normal(5), name="foo")\n as_typed = ser.astype(dtype)\n\n assert as_typed.dtype == dtype\n assert as_typed.name == ser.name\n\n @pytest.mark.parametrize("value", [np.nan, np.inf])\n @pytest.mark.parametrize("dtype", [np.int32, np.int64])\n def test_astype_cast_nan_inf_int(self, dtype, value):\n # gh-14265: check NaN and inf raise error when converting to int\n msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"\n ser = Series([value])\n\n with pytest.raises(ValueError, match=msg):\n ser.astype(dtype)\n\n @pytest.mark.parametrize("dtype", [int, np.int8, np.int64])\n def test_astype_cast_object_int_fail(self, dtype):\n arr = Series(["car", "house", "tree", "1"])\n msg = r"invalid literal for int\(\) with base 10: 'car'"\n with pytest.raises(ValueError, match=msg):\n arr.astype(dtype)\n\n def test_astype_float_to_uint_negatives_raise(\n self, float_numpy_dtype, any_unsigned_int_numpy_dtype\n ):\n # GH#45151 We don't cast negative numbers to nonsense values\n # TODO: same for EA float/uint dtypes, signed integers?\n arr = np.arange(5).astype(float_numpy_dtype) - 3 # includes negatives\n ser = Series(arr)\n\n msg = "Cannot losslessly cast from .* to .*"\n with pytest.raises(ValueError, match=msg):\n ser.astype(any_unsigned_int_numpy_dtype)\n\n with pytest.raises(ValueError, match=msg):\n ser.to_frame().astype(any_unsigned_int_numpy_dtype)\n\n with pytest.raises(ValueError, match=msg):\n # We currently catch and re-raise in Index.astype\n Index(ser).astype(any_unsigned_int_numpy_dtype)\n\n with pytest.raises(ValueError, match=msg):\n ser.array.astype(any_unsigned_int_numpy_dtype)\n\n def test_astype_cast_object_int(self):\n arr = Series(["1", "2", "3", "4"], dtype=object)\n result = arr.astype(int)\n\n tm.assert_series_equal(result, Series(np.arange(1, 5)))\n\n def test_astype_unicode(self, using_infer_string):\n # see GH#7758: A bit of magic is required to set\n # default encoding to utf-8\n digits = string.digits\n test_series = [\n Series([digits * 10, rand_str(63), rand_str(64), rand_str(1000)]),\n Series(["データーサイエンス、お前はもう死んでいる"]),\n ]\n\n former_encoding = None\n\n if sys.getdefaultencoding() == "utf-8":\n # GH#45326 as of 2.0 Series.astype matches Index.astype by handling\n # bytes with obj.decode() instead of str(obj)\n item = "野菜食べないとやばい"\n ser = Series([item.encode()])\n result = ser.astype(np.str_)\n expected = Series([item], dtype=object)\n tm.assert_series_equal(result, expected)\n\n for ser in test_series:\n res = ser.astype(np.str_)\n expec = ser.map(str)\n if using_infer_string:\n expec = expec.astype(object)\n tm.assert_series_equal(res, expec)\n\n # Restore the former encoding\n if former_encoding is not None and former_encoding != "utf-8":\n reload(sys)\n sys.setdefaultencoding(former_encoding)\n\n def test_astype_bytes(self):\n # GH#39474\n result = Series(["foo", "bar", "baz"]).astype(bytes)\n assert result.dtypes == np.dtype("S3")\n\n def test_astype_nan_to_bool(self):\n # GH#43018\n ser = Series(np.nan, dtype="object")\n result = ser.astype("bool")\n expected = Series(True, dtype="bool")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "dtype",\n tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES,\n )\n def test_astype_ea_to_datetimetzdtype(self, dtype):\n # GH37553\n ser = Series([4, 0, 9], dtype=dtype)\n result = ser.astype(DatetimeTZDtype(tz="US/Pacific"))\n\n expected = Series(\n {\n 0: Timestamp("1969-12-31 16:00:00.000000004-08:00", tz="US/Pacific"),\n 1: Timestamp("1969-12-31 16:00:00.000000000-08:00", tz="US/Pacific"),\n 2: Timestamp("1969-12-31 16:00:00.000000009-08:00", tz="US/Pacific"),\n }\n )\n\n tm.assert_series_equal(result, expected)\n\n def test_astype_retain_attrs(self, any_numpy_dtype):\n # GH#44414\n ser = Series([0, 1, 2, 3])\n ser.attrs["Location"] = "Michigan"\n\n result = ser.astype(any_numpy_dtype).attrs\n expected = ser.attrs\n\n tm.assert_dict_equal(expected, result)\n\n\nclass TestAstypeString:\n @pytest.mark.parametrize(\n "data, dtype",\n [\n ([True, NA], "boolean"),\n (["A", NA], "category"),\n (["2020-10-10", "2020-10-10"], "datetime64[ns]"),\n (["2020-10-10", "2020-10-10", NaT], "datetime64[ns]"),\n (\n ["2012-01-01 00:00:00-05:00", NaT],\n "datetime64[ns, US/Eastern]",\n ),\n ([1, None], "UInt16"),\n (["1/1/2021", "2/1/2021"], "period[M]"),\n (["1/1/2021", "2/1/2021", NaT], "period[M]"),\n (["1 Day", "59 Days", NaT], "timedelta64[ns]"),\n # currently no way to parse IntervalArray from a list of strings\n ],\n )\n def test_astype_string_to_extension_dtype_roundtrip(\n self, data, dtype, request, nullable_string_dtype\n ):\n if dtype == "boolean":\n mark = pytest.mark.xfail(\n reason="TODO StringArray.astype() with missing values #GH40566"\n )\n request.applymarker(mark)\n # GH-40351\n ser = Series(data, dtype=dtype)\n\n # Note: just passing .astype(dtype) fails for dtype="category"\n # with bc ser.dtype.categories will be object dtype whereas\n # result.dtype.categories will have string dtype\n result = ser.astype(nullable_string_dtype).astype(ser.dtype)\n tm.assert_series_equal(result, ser)\n\n\nclass TestAstypeCategorical:\n def test_astype_categorical_to_other(self):\n cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])\n ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values()\n ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)\n\n expected = ser\n tm.assert_series_equal(ser.astype("category"), expected)\n tm.assert_series_equal(ser.astype(CategoricalDtype()), expected)\n msg = r"Cannot cast object|str dtype to float64"\n with pytest.raises(ValueError, match=msg):\n ser.astype("float64")\n\n cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))\n exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"], dtype="str")\n tm.assert_series_equal(cat.astype("str"), exp)\n s2 = Series(Categorical(["1", "2", "3", "4"]))\n exp2 = Series([1, 2, 3, 4]).astype("int")\n tm.assert_series_equal(s2.astype("int"), exp2)\n\n # object don't sort correctly, so just compare that we have the same\n # values\n def cmp(a, b):\n tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b)))\n\n expected = Series(np.array(ser.values), name="value_group")\n cmp(ser.astype("object"), expected)\n cmp(ser.astype(np.object_), expected)\n\n # array conversion\n tm.assert_almost_equal(np.array(ser), np.array(ser.values))\n\n tm.assert_series_equal(ser.astype("category"), ser)\n tm.assert_series_equal(ser.astype(CategoricalDtype()), ser)\n\n roundtrip_expected = ser.cat.set_categories(\n ser.cat.categories.sort_values()\n ).cat.remove_unused_categories()\n result = ser.astype("object").astype("category")\n tm.assert_series_equal(result, roundtrip_expected)\n result = ser.astype("object").astype(CategoricalDtype())\n tm.assert_series_equal(result, roundtrip_expected)\n\n def test_astype_categorical_invalid_conversions(self):\n # invalid conversion (these are NOT a dtype)\n cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])\n ser = Series(np.random.default_rng(2).integers(0, 10000, 100)).sort_values()\n ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)\n\n msg = (\n "dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "\n "not understood"\n )\n with pytest.raises(TypeError, match=msg):\n ser.astype(Categorical)\n with pytest.raises(TypeError, match=msg):\n ser.astype("object").astype(Categorical)\n\n def test_astype_categoricaldtype(self):\n ser = Series(["a", "b", "a"])\n result = ser.astype(CategoricalDtype(["a", "b"], ordered=True))\n expected = Series(Categorical(["a", "b", "a"], ordered=True))\n tm.assert_series_equal(result, expected)\n\n result = ser.astype(CategoricalDtype(["a", "b"], ordered=False))\n expected = Series(Categorical(["a", "b", "a"], ordered=False))\n tm.assert_series_equal(result, expected)\n\n result = ser.astype(CategoricalDtype(["a", "b", "c"], ordered=False))\n expected = Series(\n Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False)\n )\n tm.assert_series_equal(result, expected)\n tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))\n\n @pytest.mark.parametrize("name", [None, "foo"])\n @pytest.mark.parametrize("dtype_ordered", [True, False])\n @pytest.mark.parametrize("series_ordered", [True, False])\n def test_astype_categorical_to_categorical(\n self, name, dtype_ordered, series_ordered\n ):\n # GH#10696, GH#18593\n s_data = list("abcaacbab")\n s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)\n ser = Series(s_data, dtype=s_dtype, name=name)\n\n # unspecified categories\n dtype = CategoricalDtype(ordered=dtype_ordered)\n result = ser.astype(dtype)\n exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)\n expected = Series(s_data, name=name, dtype=exp_dtype)\n tm.assert_series_equal(result, expected)\n\n # different categories\n dtype = CategoricalDtype(list("adc"), dtype_ordered)\n result = ser.astype(dtype)\n expected = Series(s_data, name=name, dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n if dtype_ordered is False:\n # not specifying ordered, so only test once\n expected = ser\n result = ser.astype("category")\n tm.assert_series_equal(result, expected)\n\n def test_astype_bool_missing_to_categorical(self):\n # GH-19182\n ser = Series([True, False, np.nan])\n assert ser.dtypes == np.object_\n\n result = ser.astype(CategoricalDtype(categories=[True, False]))\n expected = Series(Categorical([True, False, np.nan], categories=[True, False]))\n tm.assert_series_equal(result, expected)\n\n def test_astype_categories_raises(self):\n # deprecated GH#17636, removed in GH#27141\n ser = Series(["a", "b", "a"])\n with pytest.raises(TypeError, match="got an unexpected"):\n ser.astype("category", categories=["a", "b"], ordered=True)\n\n @pytest.mark.parametrize("items", [["a", "b", "c", "a"], [1, 2, 3, 1]])\n def test_astype_from_categorical(self, items):\n ser = Series(items)\n exp = Series(Categorical(items))\n res = ser.astype("category")\n tm.assert_series_equal(res, exp)\n\n def test_astype_from_categorical_with_keywords(self):\n # with keywords\n lst = ["a", "b", "c", "a"]\n ser = Series(lst)\n exp = Series(Categorical(lst, ordered=True))\n res = ser.astype(CategoricalDtype(None, ordered=True))\n tm.assert_series_equal(res, exp)\n\n exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True))\n res = ser.astype(CategoricalDtype(list("abcdef"), ordered=True))\n tm.assert_series_equal(res, exp)\n\n def test_astype_timedelta64_with_np_nan(self):\n # GH45798\n result = Series([Timedelta(1), np.nan], dtype="timedelta64[ns]")\n expected = Series([Timedelta(1), NaT], dtype="timedelta64[ns]")\n tm.assert_series_equal(result, expected)\n\n @td.skip_if_no("pyarrow")\n def test_astype_int_na_string(self):\n # GH#57418\n ser = Series([12, NA], dtype="Int64[pyarrow]")\n result = ser.astype("string[pyarrow]")\n expected = Series(["12", NA], dtype="string[pyarrow]")\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_astype.py | test_astype.py | Python | 25,745 | 0.95 | 0.11611 | 0.104167 | react-lib | 441 | 2025-05-20T07:04:40.359776 | MIT | true | 331a1bb72d0ea9e5a02b618ee21821e4 |
import numpy as np\n\n\nclass TestAutoCorr:\n def test_autocorr(self, datetime_series):\n # Just run the function\n corr1 = datetime_series.autocorr()\n\n # Now run it with the lag parameter\n corr2 = datetime_series.autocorr(lag=1)\n\n # corr() with lag needs Series of at least length 2\n if len(datetime_series) <= 2:\n assert np.isnan(corr1)\n assert np.isnan(corr2)\n else:\n assert corr1 == corr2\n\n # Choose a random lag between 1 and length of Series - 2\n # and compare the result with the Series corr() function\n n = 1 + np.random.default_rng(2).integers(max(1, len(datetime_series) - 2))\n corr1 = datetime_series.corr(datetime_series.shift(n))\n corr2 = datetime_series.autocorr(lag=n)\n\n # corr() with lag needs Series of at least length 2\n if len(datetime_series) <= 2:\n assert np.isnan(corr1)\n assert np.isnan(corr2)\n else:\n assert corr1 == corr2\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_autocorr.py | test_autocorr.py | Python | 1,015 | 0.95 | 0.2 | 0.25 | node-utils | 339 | 2025-04-27T16:32:46.423937 | MIT | true | 8997bc8a02f876d066a99956a8f196bd |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Series,\n bdate_range,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\n\n\nclass TestBetween:\n def test_between(self):\n series = Series(date_range("1/1/2000", periods=10))\n left, right = series[[2, 7]]\n\n result = series.between(left, right)\n expected = (series >= left) & (series <= right)\n tm.assert_series_equal(result, expected)\n\n def test_between_datetime_object_dtype(self):\n ser = Series(bdate_range("1/1/2000", periods=20), dtype=object)\n ser[::2] = np.nan\n\n result = ser[ser.between(ser[3], ser[17])]\n expected = ser[3:18].dropna()\n tm.assert_series_equal(result, expected)\n\n result = ser[ser.between(ser[3], ser[17], inclusive="neither")]\n expected = ser[5:16].dropna()\n tm.assert_series_equal(result, expected)\n\n def test_between_period_values(self):\n ser = Series(period_range("2000-01-01", periods=10, freq="D"))\n left, right = ser[[2, 7]]\n result = ser.between(left, right)\n expected = (ser >= left) & (ser <= right)\n tm.assert_series_equal(result, expected)\n\n def test_between_inclusive_string(self):\n # GH 40628\n series = Series(date_range("1/1/2000", periods=10))\n left, right = series[[2, 7]]\n\n result = series.between(left, right, inclusive="both")\n expected = (series >= left) & (series <= right)\n tm.assert_series_equal(result, expected)\n\n result = series.between(left, right, inclusive="left")\n expected = (series >= left) & (series < right)\n tm.assert_series_equal(result, expected)\n\n result = series.between(left, right, inclusive="right")\n expected = (series > left) & (series <= right)\n tm.assert_series_equal(result, expected)\n\n result = series.between(left, right, inclusive="neither")\n expected = (series > left) & (series < right)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("inclusive", ["yes", True, False])\n def test_between_error_args(self, inclusive):\n # GH 40628\n series = Series(date_range("1/1/2000", periods=10))\n left, right = series[[2, 7]]\n\n value_error_msg = (\n "Inclusive has to be either string of 'both',"\n "'left', 'right', or 'neither'."\n )\n\n with pytest.raises(ValueError, match=value_error_msg):\n series = Series(date_range("1/1/2000", periods=10))\n series.between(left, right, inclusive=inclusive)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_between.py | test_between.py | Python | 2,584 | 0.95 | 0.08 | 0.033898 | python-kit | 139 | 2025-06-19T12:23:32.451987 | Apache-2.0 | true | bcb547c02617e603e52d097545c5fc87 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Series,\n array as pd_array,\n date_range,\n)\nimport pandas._testing as tm\n\n\n@pytest.fixture\ndef df():\n """\n base dataframe for testing\n """\n return DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n\n\ndef test_case_when_caselist_is_not_a_list(df):\n """\n Raise ValueError if caselist is not a list.\n """\n msg = "The caselist argument should be a list; "\n msg += "instead got.+"\n with pytest.raises(TypeError, match=msg): # GH39154\n df["a"].case_when(caselist=())\n\n\ndef test_case_when_no_caselist(df):\n """\n Raise ValueError if no caselist is provided.\n """\n msg = "provide at least one boolean condition, "\n msg += "with a corresponding replacement."\n with pytest.raises(ValueError, match=msg): # GH39154\n df["a"].case_when([])\n\n\ndef test_case_when_odd_caselist(df):\n """\n Raise ValueError if no of caselist is odd.\n """\n msg = "Argument 0 must have length 2; "\n msg += "a condition and replacement; instead got length 3."\n\n with pytest.raises(ValueError, match=msg):\n df["a"].case_when([(df["a"].eq(1), 1, df.a.gt(1))])\n\n\ndef test_case_when_raise_error_from_mask(df):\n """\n Raise Error from within Series.mask\n """\n msg = "Failed to apply condition0 and replacement0."\n with pytest.raises(ValueError, match=msg):\n df["a"].case_when([(df["a"].eq(1), [1, 2])])\n\n\ndef test_case_when_single_condition(df):\n """\n Test output on a single condition.\n """\n result = Series([np.nan, np.nan, np.nan]).case_when([(df.a.eq(1), 1)])\n expected = Series([1, np.nan, np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_case_when_multiple_conditions(df):\n """\n Test output when booleans are derived from a computation\n """\n result = Series([np.nan, np.nan, np.nan]).case_when(\n [(df.a.eq(1), 1), (Series([False, True, False]), 2)]\n )\n expected = Series([1, 2, np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_case_when_multiple_conditions_replacement_list(df):\n """\n Test output when replacement is a list\n """\n result = Series([np.nan, np.nan, np.nan]).case_when(\n [([True, False, False], 1), (df["a"].gt(1) & df["b"].eq(5), [1, 2, 3])]\n )\n expected = Series([1, 2, np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_case_when_multiple_conditions_replacement_extension_dtype(df):\n """\n Test output when replacement has an extension dtype\n """\n result = Series([np.nan, np.nan, np.nan]).case_when(\n [\n ([True, False, False], 1),\n (df["a"].gt(1) & df["b"].eq(5), pd_array([1, 2, 3], dtype="Int64")),\n ],\n )\n expected = Series([1, 2, np.nan], dtype="Float64")\n tm.assert_series_equal(result, expected)\n\n\ndef test_case_when_multiple_conditions_replacement_series(df):\n """\n Test output when replacement is a Series\n """\n result = Series([np.nan, np.nan, np.nan]).case_when(\n [\n (np.array([True, False, False]), 1),\n (df["a"].gt(1) & df["b"].eq(5), Series([1, 2, 3])),\n ],\n )\n expected = Series([1, 2, np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_case_when_non_range_index():\n """\n Test output if index is not RangeIndex\n """\n rng = np.random.default_rng(seed=123)\n dates = date_range("1/1/2000", periods=8)\n df = DataFrame(\n rng.standard_normal(size=(8, 4)), index=dates, columns=["A", "B", "C", "D"]\n )\n result = Series(5, index=df.index, name="A").case_when([(df.A.gt(0), df.B)])\n expected = df.A.mask(df.A.gt(0), df.B).where(df.A.gt(0), 5)\n tm.assert_series_equal(result, expected)\n\n\ndef test_case_when_callable():\n """\n Test output on a callable\n """\n # https://numpy.org/doc/stable/reference/generated/numpy.piecewise.html\n x = np.linspace(-2.5, 2.5, 6)\n ser = Series(x)\n result = ser.case_when(\n caselist=[\n (lambda df: df < 0, lambda df: -df),\n (lambda df: df >= 0, lambda df: df),\n ]\n )\n expected = np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])\n tm.assert_series_equal(result, Series(expected))\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_case_when.py | test_case_when.py | Python | 4,223 | 0.95 | 0.114865 | 0.008197 | awesome-app | 340 | 2023-08-13T17:05:39.512431 | GPL-3.0 | true | 2981c562f9ec8d62a562f52b23979328 |
from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Series,\n Timestamp,\n isna,\n notna,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesClip:\n def test_clip(self, datetime_series):\n val = datetime_series.median()\n\n assert datetime_series.clip(lower=val).min() == val\n assert datetime_series.clip(upper=val).max() == val\n\n result = datetime_series.clip(-0.5, 0.5)\n expected = np.clip(datetime_series, -0.5, 0.5)\n tm.assert_series_equal(result, expected)\n assert isinstance(expected, Series)\n\n def test_clip_types_and_nulls(self):\n sers = [\n Series([np.nan, 1.0, 2.0, 3.0]),\n Series([None, "a", "b", "c"]),\n Series(pd.to_datetime([np.nan, 1, 2, 3], unit="D")),\n ]\n\n for s in sers:\n thresh = s[2]\n lower = s.clip(lower=thresh)\n upper = s.clip(upper=thresh)\n assert lower[notna(lower)].min() == thresh\n assert upper[notna(upper)].max() == thresh\n assert list(isna(s)) == list(isna(lower))\n assert list(isna(s)) == list(isna(upper))\n\n def test_series_clipping_with_na_values(self, any_numeric_ea_dtype, nulls_fixture):\n # Ensure that clipping method can handle NA values with out failing\n # GH#40581\n\n if nulls_fixture is pd.NaT:\n # constructor will raise, see\n # test_constructor_mismatched_null_nullable_dtype\n pytest.skip("See test_constructor_mismatched_null_nullable_dtype")\n\n ser = Series([nulls_fixture, 1.0, 3.0], dtype=any_numeric_ea_dtype)\n s_clipped_upper = ser.clip(upper=2.0)\n s_clipped_lower = ser.clip(lower=2.0)\n\n expected_upper = Series([nulls_fixture, 1.0, 2.0], dtype=any_numeric_ea_dtype)\n expected_lower = Series([nulls_fixture, 2.0, 3.0], dtype=any_numeric_ea_dtype)\n\n tm.assert_series_equal(s_clipped_upper, expected_upper)\n tm.assert_series_equal(s_clipped_lower, expected_lower)\n\n def test_clip_with_na_args(self):\n """Should process np.nan argument as None"""\n # GH#17276\n s = Series([1, 2, 3])\n\n tm.assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))\n tm.assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3]))\n\n # GH#19992\n msg = "Downcasting behavior in Series and DataFrame methods 'where'"\n # TODO: avoid this warning here? seems like we should never be upcasting\n # in the first place?\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = s.clip(lower=[0, 4, np.nan])\n tm.assert_series_equal(res, Series([1, 4, 3]))\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = s.clip(upper=[1, np.nan, 1])\n tm.assert_series_equal(res, Series([1, 2, 1]))\n\n # GH#40420\n s = Series([1, 2, 3])\n result = s.clip(0, [np.nan, np.nan, np.nan])\n tm.assert_series_equal(s, result)\n\n def test_clip_against_series(self):\n # GH#6966\n\n s = Series([1.0, 1.0, 4.0])\n\n lower = Series([1.0, 2.0, 3.0])\n upper = Series([1.5, 2.5, 3.5])\n\n tm.assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))\n tm.assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))\n\n @pytest.mark.parametrize("inplace", [True, False])\n @pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])\n def test_clip_against_list_like(self, inplace, upper):\n # GH#15390\n original = Series([5, 6, 7])\n result = original.clip(upper=upper, inplace=inplace)\n expected = Series([1, 2, 3])\n\n if inplace:\n result = original\n tm.assert_series_equal(result, expected, check_exact=True)\n\n def test_clip_with_datetimes(self):\n # GH#11838\n # naive and tz-aware datetimes\n\n t = Timestamp("2015-12-01 09:30:30")\n s = Series([Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:31:00")])\n result = s.clip(upper=t)\n expected = Series(\n [Timestamp("2015-12-01 09:30:00"), Timestamp("2015-12-01 09:30:30")]\n )\n tm.assert_series_equal(result, expected)\n\n t = Timestamp("2015-12-01 09:30:30", tz="US/Eastern")\n s = Series(\n [\n Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),\n Timestamp("2015-12-01 09:31:00", tz="US/Eastern"),\n ]\n )\n result = s.clip(upper=t)\n expected = Series(\n [\n Timestamp("2015-12-01 09:30:00", tz="US/Eastern"),\n Timestamp("2015-12-01 09:30:30", tz="US/Eastern"),\n ]\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", [object, "M8[us]"])\n def test_clip_with_timestamps_and_oob_datetimes(self, dtype):\n # GH-42794\n ser = Series([datetime(1, 1, 1), datetime(9999, 9, 9)], dtype=dtype)\n\n result = ser.clip(lower=Timestamp.min, upper=Timestamp.max)\n expected = Series([Timestamp.min, Timestamp.max], dtype=dtype)\n\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_clip.py | test_clip.py | Python | 5,220 | 0.95 | 0.082192 | 0.119658 | node-utils | 621 | 2025-03-13T21:29:21.584340 | Apache-2.0 | true | 347561e6090a576bbc1610a30a5f171d |
from pandas import Series\nimport pandas._testing as tm\n\n\nclass TestCombine:\n def test_combine_scalar(self):\n # GH#21248\n # Note - combine() with another Series is tested elsewhere because\n # it is used when testing operators\n ser = Series([i * 10 for i in range(5)])\n result = ser.combine(3, lambda x, y: x + y)\n expected = Series([i * 10 + 3 for i in range(5)])\n tm.assert_series_equal(result, expected)\n\n result = ser.combine(22, lambda x, y: min(x, y))\n expected = Series([min(i * 10, 22) for i in range(5)])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_combine.py | test_combine.py | Python | 627 | 0.95 | 0.294118 | 0.214286 | python-kit | 71 | 2024-05-15T18:24:49.204493 | GPL-3.0 | true | 01d0a8a27a8fcdd13fff091b52207d94 |
from datetime import datetime\n\nimport numpy as np\n\nimport pandas as pd\nfrom pandas import (\n Period,\n Series,\n date_range,\n period_range,\n to_datetime,\n)\nimport pandas._testing as tm\n\n\nclass TestCombineFirst:\n def test_combine_first_period_datetime(self):\n # GH#3367\n didx = date_range(start="1950-01-31", end="1950-07-31", freq="ME")\n pidx = period_range(start=Period("1950-1"), end=Period("1950-7"), freq="M")\n # check to be consistent with DatetimeIndex\n for idx in [didx, pidx]:\n a = Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)\n b = Series([9, 9, 9, 9, 9, 9, 9], index=idx)\n\n result = a.combine_first(b)\n expected = Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)\n tm.assert_series_equal(result, expected)\n\n def test_combine_first_name(self, datetime_series):\n result = datetime_series.combine_first(datetime_series[:5])\n assert result.name == datetime_series.name\n\n def test_combine_first(self):\n values = np.arange(20, dtype=np.float64)\n series = Series(values, index=np.arange(20, dtype=np.int64))\n\n series_copy = series * 2\n series_copy[::2] = np.nan\n\n # nothing used from the input\n combined = series.combine_first(series_copy)\n\n tm.assert_series_equal(combined, series)\n\n # Holes filled from input\n combined = series_copy.combine_first(series)\n assert np.isfinite(combined).all()\n\n tm.assert_series_equal(combined[::2], series[::2])\n tm.assert_series_equal(combined[1::2], series_copy[1::2])\n\n # mixed types\n index = pd.Index([str(i) for i in range(20)])\n floats = Series(np.random.default_rng(2).standard_normal(20), index=index)\n strings = Series([str(i) for i in range(10)], index=index[::2], dtype=object)\n\n combined = strings.combine_first(floats)\n\n tm.assert_series_equal(strings, combined.loc[index[::2]])\n tm.assert_series_equal(floats[1::2].astype(object), combined.loc[index[1::2]])\n\n # corner case\n ser = Series([1.0, 2, 3], index=[0, 1, 2])\n empty = Series([], index=[], dtype=object)\n msg = "The behavior of array concatenation with empty entries is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser.combine_first(empty)\n ser.index = ser.index.astype("O")\n tm.assert_series_equal(ser, result)\n\n def test_combine_first_dt64(self, unit):\n s0 = to_datetime(Series(["2010", np.nan])).dt.as_unit(unit)\n s1 = to_datetime(Series([np.nan, "2011"])).dt.as_unit(unit)\n rs = s0.combine_first(s1)\n xp = to_datetime(Series(["2010", "2011"])).dt.as_unit(unit)\n tm.assert_series_equal(rs, xp)\n\n s0 = to_datetime(Series(["2010", np.nan])).dt.as_unit(unit)\n s1 = Series([np.nan, "2011"])\n rs = s0.combine_first(s1)\n\n xp = Series([datetime(2010, 1, 1), "2011"], dtype="datetime64[ns]")\n\n tm.assert_series_equal(rs, xp)\n\n def test_combine_first_dt_tz_values(self, tz_naive_fixture):\n ser1 = Series(\n pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture),\n name="ser1",\n )\n ser2 = Series(\n pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture),\n index=[2, 3, 4],\n name="ser2",\n )\n result = ser1.combine_first(ser2)\n exp_vals = pd.DatetimeIndex(\n ["20150101", "20150102", "20150103", "20160515", "20160516"],\n tz=tz_naive_fixture,\n )\n exp = Series(exp_vals, name="ser1")\n tm.assert_series_equal(exp, result)\n\n def test_combine_first_timezone_series_with_empty_series(self):\n # GH 41800\n time_index = date_range(\n datetime(2021, 1, 1, 1),\n datetime(2021, 1, 1, 10),\n freq="h",\n tz="Europe/Rome",\n )\n s1 = Series(range(10), index=time_index)\n s2 = Series(index=time_index)\n msg = "The behavior of array concatenation with empty entries is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s1.combine_first(s2)\n tm.assert_series_equal(result, s1)\n\n def test_combine_first_preserves_dtype(self):\n # GH51764\n s1 = Series([1666880195890293744, 1666880195890293837])\n s2 = Series([1, 2, 3])\n result = s1.combine_first(s2)\n expected = Series([1666880195890293744, 1666880195890293837, 3])\n tm.assert_series_equal(result, expected)\n\n def test_combine_mixed_timezone(self):\n # GH 26283\n uniform_tz = Series({pd.Timestamp("2019-05-01", tz="UTC"): 1.0})\n multi_tz = Series(\n {\n pd.Timestamp("2019-05-01 01:00:00+0100", tz="Europe/London"): 2.0,\n pd.Timestamp("2019-05-02", tz="UTC"): 3.0,\n }\n )\n\n result = uniform_tz.combine_first(multi_tz)\n expected = Series(\n [1.0, 3.0],\n index=pd.Index(\n [\n pd.Timestamp("2019-05-01 00:00:00+00:00", tz="UTC"),\n pd.Timestamp("2019-05-02 00:00:00+00:00", tz="UTC"),\n ],\n dtype="object",\n ),\n )\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_combine_first.py | test_combine_first.py | Python | 5,420 | 0.95 | 0.080537 | 0.072581 | vue-tools | 571 | 2025-05-21T11:36:14.816638 | MIT | true | 3949c0458592c85a4e7e1bdcd866cf40 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])\ndef test_compare_axis(align_axis):\n # GH#30429\n s1 = pd.Series(["a", "b", "c"])\n s2 = pd.Series(["x", "b", "z"])\n\n result = s1.compare(s2, align_axis=align_axis)\n\n if align_axis in (1, "columns"):\n indices = pd.Index([0, 2])\n columns = pd.Index(["self", "other"])\n expected = pd.DataFrame(\n [["a", "x"], ["c", "z"]], index=indices, columns=columns\n )\n tm.assert_frame_equal(result, expected)\n else:\n indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])\n expected = pd.Series(["a", "x", "c", "z"], index=indices)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "keep_shape, keep_equal",\n [\n (True, False),\n (False, True),\n (True, True),\n # False, False case is already covered in test_compare_axis\n ],\n)\ndef test_compare_various_formats(keep_shape, keep_equal):\n s1 = pd.Series(["a", "b", "c"])\n s2 = pd.Series(["x", "b", "z"])\n\n result = s1.compare(s2, keep_shape=keep_shape, keep_equal=keep_equal)\n\n if keep_shape:\n indices = pd.Index([0, 1, 2])\n columns = pd.Index(["self", "other"])\n if keep_equal:\n expected = pd.DataFrame(\n [["a", "x"], ["b", "b"], ["c", "z"]], index=indices, columns=columns\n )\n else:\n expected = pd.DataFrame(\n [["a", "x"], [np.nan, np.nan], ["c", "z"]],\n index=indices,\n columns=columns,\n )\n else:\n indices = pd.Index([0, 2])\n columns = pd.Index(["self", "other"])\n expected = pd.DataFrame(\n [["a", "x"], ["c", "z"]], index=indices, columns=columns\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_compare_with_equal_nulls():\n # We want to make sure two NaNs are considered the same\n # and dropped where applicable\n s1 = pd.Series(["a", "b", np.nan])\n s2 = pd.Series(["x", "b", np.nan])\n\n result = s1.compare(s2)\n expected = pd.DataFrame([["a", "x"]], columns=["self", "other"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_compare_with_non_equal_nulls():\n # We want to make sure the relevant NaNs do not get dropped\n s1 = pd.Series(["a", "b", "c"])\n s2 = pd.Series(["x", "b", np.nan])\n\n result = s1.compare(s2, align_axis=0)\n\n indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])\n expected = pd.Series(["a", "x", "c", np.nan], index=indices)\n tm.assert_series_equal(result, expected)\n\n\ndef test_compare_multi_index():\n index = pd.MultiIndex.from_arrays([[0, 0, 1], [0, 1, 2]])\n s1 = pd.Series(["a", "b", "c"], index=index)\n s2 = pd.Series(["x", "b", "z"], index=index)\n\n result = s1.compare(s2, align_axis=0)\n\n indices = pd.MultiIndex.from_arrays(\n [[0, 0, 1, 1], [0, 0, 2, 2], ["self", "other", "self", "other"]]\n )\n expected = pd.Series(["a", "x", "c", "z"], index=indices)\n tm.assert_series_equal(result, expected)\n\n\ndef test_compare_unaligned_objects():\n # test Series with different indices\n msg = "Can only compare identically-labeled Series objects"\n with pytest.raises(ValueError, match=msg):\n ser1 = pd.Series([1, 2, 3], index=["a", "b", "c"])\n ser2 = pd.Series([1, 2, 3], index=["a", "b", "d"])\n ser1.compare(ser2)\n\n # test Series with different lengths\n msg = "Can only compare identically-labeled Series objects"\n with pytest.raises(ValueError, match=msg):\n ser1 = pd.Series([1, 2, 3])\n ser2 = pd.Series([1, 2, 3, 4])\n ser1.compare(ser2)\n\n\ndef test_compare_datetime64_and_string():\n # Issue https://github.com/pandas-dev/pandas/issues/45506\n # Catch OverflowError when comparing datetime64 and string\n data = [\n {"a": "2015-07-01", "b": "08335394550"},\n {"a": "2015-07-02", "b": "+49 (0) 0345 300033"},\n {"a": "2015-07-03", "b": "+49(0)2598 04457"},\n {"a": "2015-07-04", "b": "0741470003"},\n {"a": "2015-07-05", "b": "04181 83668"},\n ]\n dtypes = {"a": "datetime64[ns]", "b": "string"}\n df = pd.DataFrame(data=data).astype(dtypes)\n\n result_eq1 = df["a"].eq(df["b"])\n result_eq2 = df["a"] == df["b"]\n result_neq = df["a"] != df["b"]\n\n expected_eq = pd.Series([False] * 5) # For .eq and ==\n expected_neq = pd.Series([True] * 5) # For !=\n\n tm.assert_series_equal(result_eq1, expected_eq)\n tm.assert_series_equal(result_eq2, expected_eq)\n tm.assert_series_equal(result_neq, expected_neq)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_compare.py | test_compare.py | Python | 4,658 | 0.95 | 0.070922 | 0.079646 | vue-tools | 91 | 2024-11-21T19:54:10.022387 | GPL-3.0 | true | db5d4e35a8262d810b3d9ad72593b718 |
from itertools import product\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import lib\n\nimport pandas as pd\nimport pandas._testing as tm\n\n# Each test case consists of a tuple with the data and dtype to create the\n# test Series, the default dtype for the expected result (which is valid\n# for most cases), and the specific cases where the result deviates from\n# this default. Those overrides are defined as a dict with (keyword, val) as\n# dictionary key. In case of multiple items, the last override takes precedence.\n\n\n@pytest.fixture(\n params=[\n (\n # data\n [1, 2, 3],\n # original dtype\n np.dtype("int32"),\n # default expected dtype\n "Int32",\n # exceptions on expected dtype\n {("convert_integer", False): np.dtype("int32")},\n ),\n (\n [1, 2, 3],\n np.dtype("int64"),\n "Int64",\n {("convert_integer", False): np.dtype("int64")},\n ),\n (\n ["x", "y", "z"],\n np.dtype("O"),\n pd.StringDtype(),\n {("convert_string", False): np.dtype("O")},\n ),\n (\n [True, False, np.nan],\n np.dtype("O"),\n pd.BooleanDtype(),\n {("convert_boolean", False): np.dtype("O")},\n ),\n (\n ["h", "i", np.nan],\n np.dtype("O"),\n pd.StringDtype(),\n {("convert_string", False): np.dtype("O")},\n ),\n ( # GH32117\n ["h", "i", 1],\n np.dtype("O"),\n np.dtype("O"),\n {},\n ),\n (\n [10, np.nan, 20],\n np.dtype("float"),\n "Int64",\n {\n ("convert_integer", False, "convert_floating", True): "Float64",\n ("convert_integer", False, "convert_floating", False): np.dtype(\n "float"\n ),\n },\n ),\n (\n [np.nan, 100.5, 200],\n np.dtype("float"),\n "Float64",\n {("convert_floating", False): np.dtype("float")},\n ),\n (\n [3, 4, 5],\n "Int8",\n "Int8",\n {},\n ),\n (\n [[1, 2], [3, 4], [5]],\n None,\n np.dtype("O"),\n {},\n ),\n (\n [4, 5, 6],\n np.dtype("uint32"),\n "UInt32",\n {("convert_integer", False): np.dtype("uint32")},\n ),\n (\n [-10, 12, 13],\n np.dtype("i1"),\n "Int8",\n {("convert_integer", False): np.dtype("i1")},\n ),\n (\n [1.2, 1.3],\n np.dtype("float32"),\n "Float32",\n {("convert_floating", False): np.dtype("float32")},\n ),\n (\n [1, 2.0],\n object,\n "Int64",\n {\n ("convert_integer", False): "Float64",\n ("convert_integer", False, "convert_floating", False): np.dtype(\n "float"\n ),\n ("infer_objects", False): np.dtype("object"),\n },\n ),\n (\n [1, 2.5],\n object,\n "Float64",\n {\n ("convert_floating", False): np.dtype("float"),\n ("infer_objects", False): np.dtype("object"),\n },\n ),\n (["a", "b"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),\n (\n pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]).as_unit("s"),\n pd.DatetimeTZDtype(tz="UTC"),\n pd.DatetimeTZDtype(tz="UTC"),\n {},\n ),\n (\n pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]).as_unit("ms"),\n pd.DatetimeTZDtype(tz="UTC"),\n pd.DatetimeTZDtype(tz="UTC"),\n {},\n ),\n (\n pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]).as_unit("us"),\n pd.DatetimeTZDtype(tz="UTC"),\n pd.DatetimeTZDtype(tz="UTC"),\n {},\n ),\n (\n pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]).as_unit("ns"),\n pd.DatetimeTZDtype(tz="UTC"),\n pd.DatetimeTZDtype(tz="UTC"),\n {},\n ),\n (\n pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]).as_unit("ns"),\n "datetime64[ns]",\n np.dtype("datetime64[ns]"),\n {},\n ),\n (\n pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]).as_unit("ns"),\n object,\n np.dtype("datetime64[ns]"),\n {("infer_objects", False): np.dtype("object")},\n ),\n (\n pd.period_range("1/1/2011", freq="M", periods=3),\n None,\n pd.PeriodDtype("M"),\n {},\n ),\n (\n pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),\n None,\n pd.IntervalDtype("int64", "right"),\n {},\n ),\n ]\n)\ndef test_cases(request):\n return request.param\n\n\nclass TestSeriesConvertDtypes:\n @pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string)", strict=False)\n @pytest.mark.parametrize("params", product(*[(True, False)] * 5))\n def test_convert_dtypes(\n self,\n test_cases,\n params,\n using_infer_string,\n ):\n data, maindtype, expected_default, expected_other = test_cases\n if (\n hasattr(data, "dtype")\n and lib.is_np_dtype(data.dtype, "M")\n and isinstance(maindtype, pd.DatetimeTZDtype)\n ):\n # this astype is deprecated in favor of tz_localize\n msg = "Cannot use .astype to convert from timezone-naive dtype"\n with pytest.raises(TypeError, match=msg):\n pd.Series(data, dtype=maindtype)\n return\n\n if maindtype is not None:\n series = pd.Series(data, dtype=maindtype)\n else:\n series = pd.Series(data)\n\n result = series.convert_dtypes(*params)\n\n param_names = [\n "infer_objects",\n "convert_string",\n "convert_integer",\n "convert_boolean",\n "convert_floating",\n ]\n params_dict = dict(zip(param_names, params))\n\n expected_dtype = expected_default\n for spec, dtype in expected_other.items():\n if all(params_dict[key] is val for key, val in zip(spec[::2], spec[1::2])):\n expected_dtype = dtype\n if (\n using_infer_string\n and expected_default == "string"\n and expected_dtype == object\n and params[0]\n and not params[1]\n ):\n # If convert_string=False and infer_objects=True, we end up with the\n # default string dtype instead of preserving object for string data\n expected_dtype = pd.StringDtype(na_value=np.nan)\n\n expected = pd.Series(data, dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n # Test that it is a copy\n copy = series.copy(deep=True)\n\n if result.notna().sum() > 0 and result.dtype in ["interval[int64, right]"]:\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n result[result.notna()] = np.nan\n else:\n result[result.notna()] = np.nan\n\n # Make sure original not changed\n tm.assert_series_equal(series, copy)\n\n def test_convert_string_dtype(self, nullable_string_dtype):\n # https://github.com/pandas-dev/pandas/issues/31731 -> converting columns\n # that are already string dtype\n df = pd.DataFrame(\n {"A": ["a", "b", pd.NA], "B": ["ä", "ö", "ü"]}, dtype=nullable_string_dtype\n )\n result = df.convert_dtypes()\n tm.assert_frame_equal(df, result)\n\n def test_convert_bool_dtype(self):\n # GH32287\n df = pd.DataFrame({"A": pd.array([True])})\n tm.assert_frame_equal(df, df.convert_dtypes())\n\n def test_convert_byte_string_dtype(self):\n # GH-43183\n byte_str = b"binary-string"\n\n df = pd.DataFrame(data={"A": byte_str}, index=[0])\n result = df.convert_dtypes()\n expected = df\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "infer_objects, dtype", [(True, "Int64"), (False, "object")]\n )\n def test_convert_dtype_object_with_na(self, infer_objects, dtype):\n # GH#48791\n ser = pd.Series([1, pd.NA])\n result = ser.convert_dtypes(infer_objects=infer_objects)\n expected = pd.Series([1, pd.NA], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "infer_objects, dtype", [(True, "Float64"), (False, "object")]\n )\n def test_convert_dtype_object_with_na_float(self, infer_objects, dtype):\n # GH#48791\n ser = pd.Series([1.5, pd.NA])\n result = ser.convert_dtypes(infer_objects=infer_objects)\n expected = pd.Series([1.5, pd.NA], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n def test_convert_dtypes_pyarrow_to_np_nullable(self):\n # GH 53648\n pytest.importorskip("pyarrow")\n ser = pd.Series(range(2), dtype="int32[pyarrow]")\n result = ser.convert_dtypes(dtype_backend="numpy_nullable")\n expected = pd.Series(range(2), dtype="Int32")\n tm.assert_series_equal(result, expected)\n\n def test_convert_dtypes_pyarrow_null(self):\n # GH#55346\n pa = pytest.importorskip("pyarrow")\n ser = pd.Series([None, None])\n result = ser.convert_dtypes(dtype_backend="pyarrow")\n expected = pd.Series([None, None], dtype=pd.ArrowDtype(pa.null()))\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_convert_dtypes.py | test_convert_dtypes.py | Python | 9,915 | 0.95 | 0.064725 | 0.077465 | python-kit | 834 | 2025-05-31T05:18:17.697540 | Apache-2.0 | true | c1fddfa4caec3a6b0986deba6c72f653 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Series,\n Timestamp,\n)\nimport pandas._testing as tm\n\n\nclass TestCopy:\n @pytest.mark.parametrize("deep", ["default", None, False, True])\n def test_copy(self, deep, using_copy_on_write, warn_copy_on_write):\n ser = Series(np.arange(10), dtype="float64")\n\n # default deep is True\n if deep == "default":\n ser2 = ser.copy()\n else:\n ser2 = ser.copy(deep=deep)\n\n if using_copy_on_write:\n # INFO(CoW) a shallow copy doesn't yet copy the data\n # but parent will not be modified (CoW)\n if deep is None or deep is False:\n assert np.may_share_memory(ser.values, ser2.values)\n else:\n assert not np.may_share_memory(ser.values, ser2.values)\n\n with tm.assert_cow_warning(warn_copy_on_write and deep is False):\n ser2[::2] = np.nan\n\n if deep is not False or using_copy_on_write:\n # Did not modify original Series\n assert np.isnan(ser2[0])\n assert not np.isnan(ser[0])\n else:\n # we DID modify the original Series\n assert np.isnan(ser2[0])\n assert np.isnan(ser[0])\n\n @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")\n @pytest.mark.parametrize("deep", ["default", None, False, True])\n def test_copy_tzaware(self, deep, using_copy_on_write):\n # GH#11794\n # copy of tz-aware\n expected = Series([Timestamp("2012/01/01", tz="UTC")])\n expected2 = Series([Timestamp("1999/01/01", tz="UTC")])\n\n ser = Series([Timestamp("2012/01/01", tz="UTC")])\n\n if deep == "default":\n ser2 = ser.copy()\n else:\n ser2 = ser.copy(deep=deep)\n\n if using_copy_on_write:\n # INFO(CoW) a shallow copy doesn't yet copy the data\n # but parent will not be modified (CoW)\n if deep is None or deep is False:\n assert np.may_share_memory(ser.values, ser2.values)\n else:\n assert not np.may_share_memory(ser.values, ser2.values)\n\n ser2[0] = Timestamp("1999/01/01", tz="UTC")\n\n # default deep is True\n if deep is not False or using_copy_on_write:\n # Did not modify original Series\n tm.assert_series_equal(ser2, expected2)\n tm.assert_series_equal(ser, expected)\n else:\n # we DID modify the original Series\n tm.assert_series_equal(ser2, expected2)\n tm.assert_series_equal(ser, expected2)\n\n def test_copy_name(self, datetime_series):\n result = datetime_series.copy()\n assert result.name == datetime_series.name\n\n def test_copy_index_name_checking(self, datetime_series):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n\n datetime_series.index.name = None\n assert datetime_series.index.name is None\n assert datetime_series is datetime_series\n\n cp = datetime_series.copy()\n cp.index.name = "foo"\n assert datetime_series.index.name is None\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_copy.py | test_copy.py | Python | 3,164 | 0.95 | 0.142857 | 0.189189 | python-kit | 274 | 2024-04-03T15:25:59.994157 | Apache-2.0 | true | fce3b8ed576fdc4c0b0244d87786ed28 |
import numpy as np\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesCount:\n def test_count(self, datetime_series):\n assert datetime_series.count() == len(datetime_series)\n\n datetime_series[::2] = np.nan\n\n assert datetime_series.count() == np.isfinite(datetime_series).sum()\n\n def test_count_inf_as_na(self):\n # GH#29478\n ser = Series([pd.Timestamp("1990/1/1")])\n msg = "use_inf_as_na option is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with pd.option_context("use_inf_as_na", True):\n assert ser.count() == 1\n\n def test_count_categorical(self):\n ser = Series(\n Categorical(\n [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True\n )\n )\n result = ser.count()\n assert result == 2\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_count.py | test_count.py | Python | 938 | 0.95 | 0.117647 | 0.037037 | python-kit | 833 | 2025-06-06T11:37:30.688415 | MIT | true | 6cf4db5ab053ce664f894b91a1dcfe33 |
import math\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Series,\n date_range,\n isna,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesCov:\n def test_cov(self, datetime_series):\n # full overlap\n tm.assert_almost_equal(\n datetime_series.cov(datetime_series), datetime_series.std() ** 2\n )\n\n # partial overlap\n tm.assert_almost_equal(\n datetime_series[:15].cov(datetime_series[5:]),\n datetime_series[5:15].std() ** 2,\n )\n\n # No overlap\n assert np.isnan(datetime_series[::2].cov(datetime_series[1::2]))\n\n # all NA\n cp = datetime_series[:10].copy()\n cp[:] = np.nan\n assert isna(cp.cov(cp))\n\n # min_periods\n assert isna(datetime_series[:15].cov(datetime_series[5:], min_periods=12))\n\n ts1 = datetime_series[:15].reindex(datetime_series.index)\n ts2 = datetime_series[5:].reindex(datetime_series.index)\n assert isna(ts1.cov(ts2, min_periods=12))\n\n @pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3])\n @pytest.mark.parametrize("dtype", ["float64", "Float64"])\n def test_cov_ddof(self, test_ddof, dtype):\n # GH#34611\n np_array1 = np.random.default_rng(2).random(10)\n np_array2 = np.random.default_rng(2).random(10)\n\n s1 = Series(np_array1, dtype=dtype)\n s2 = Series(np_array2, dtype=dtype)\n\n result = s1.cov(s2, ddof=test_ddof)\n expected = np.cov(np_array1, np_array2, ddof=test_ddof)[0][1]\n assert math.isclose(expected, result)\n\n\nclass TestSeriesCorr:\n @pytest.mark.parametrize("dtype", ["float64", "Float64"])\n def test_corr(self, datetime_series, dtype):\n stats = pytest.importorskip("scipy.stats")\n\n datetime_series = datetime_series.astype(dtype)\n\n # full overlap\n tm.assert_almost_equal(datetime_series.corr(datetime_series), 1)\n\n # partial overlap\n tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]), 1)\n\n assert isna(datetime_series[:15].corr(datetime_series[5:], min_periods=12))\n\n ts1 = datetime_series[:15].reindex(datetime_series.index)\n ts2 = datetime_series[5:].reindex(datetime_series.index)\n assert isna(ts1.corr(ts2, min_periods=12))\n\n # No overlap\n assert np.isnan(datetime_series[::2].corr(datetime_series[1::2]))\n\n # all NA\n cp = datetime_series[:10].copy()\n cp[:] = np.nan\n assert isna(cp.corr(cp))\n\n A = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n B = A.copy()\n result = A.corr(B)\n expected, _ = stats.pearsonr(A, B)\n tm.assert_almost_equal(result, expected)\n\n def test_corr_rank(self):\n stats = pytest.importorskip("scipy.stats")\n\n # kendall and spearman\n A = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n B = A.copy()\n A[-5:] = A[:5].copy()\n result = A.corr(B, method="kendall")\n expected = stats.kendalltau(A, B)[0]\n tm.assert_almost_equal(result, expected)\n\n result = A.corr(B, method="spearman")\n expected = stats.spearmanr(A, B)[0]\n tm.assert_almost_equal(result, expected)\n\n # results from R\n A = Series(\n [\n -0.89926396,\n 0.94209606,\n -1.03289164,\n -0.95445587,\n 0.76910310,\n -0.06430576,\n -2.09704447,\n 0.40660407,\n -0.89926396,\n 0.94209606,\n ]\n )\n B = Series(\n [\n -1.01270225,\n -0.62210117,\n -1.56895827,\n 0.59592943,\n -0.01680292,\n 1.17258718,\n -1.06009347,\n -0.10222060,\n -0.89076239,\n 0.89372375,\n ]\n )\n kexp = 0.4319297\n sexp = 0.5853767\n tm.assert_almost_equal(A.corr(B, method="kendall"), kexp)\n tm.assert_almost_equal(A.corr(B, method="spearman"), sexp)\n\n def test_corr_invalid_method(self):\n # GH PR #22298\n s1 = Series(np.random.default_rng(2).standard_normal(10))\n s2 = Series(np.random.default_rng(2).standard_normal(10))\n msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "\n with pytest.raises(ValueError, match=msg):\n s1.corr(s2, method="____")\n\n def test_corr_callable_method(self, datetime_series):\n # simple correlation example\n # returns 1 if exact equality, 0 otherwise\n my_corr = lambda a, b: 1.0 if (a == b).all() else 0.0\n\n # simple example\n s1 = Series([1, 2, 3, 4, 5])\n s2 = Series([5, 4, 3, 2, 1])\n expected = 0\n tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected)\n\n # full overlap\n tm.assert_almost_equal(\n datetime_series.corr(datetime_series, method=my_corr), 1.0\n )\n\n # partial overlap\n tm.assert_almost_equal(\n datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0\n )\n\n # No overlap\n assert np.isnan(\n datetime_series[::2].corr(datetime_series[1::2], method=my_corr)\n )\n\n # dataframe example\n df = pd.DataFrame([s1, s2])\n expected = pd.DataFrame([{0: 1.0, 1: 0}, {0: 0, 1: 1.0}])\n tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_cov_corr.py | test_cov_corr.py | Python | 5,709 | 0.95 | 0.054054 | 0.131579 | vue-tools | 17 | 2024-01-20T10:26:51.560383 | GPL-3.0 | true | c19dbd3aba00f5766d036cfe3ab18751 |
import numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_version_gte1p25\n\nfrom pandas.core.dtypes.common import (\n is_complex_dtype,\n is_extension_array_dtype,\n)\n\nfrom pandas import (\n NA,\n Period,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesDescribe:\n def test_describe_ints(self):\n ser = Series([0, 1, 2, 3, 4], name="int_data")\n result = ser.describe()\n expected = Series(\n [5, 2, ser.std(), 0, 1, 2, 3, 4],\n name="int_data",\n index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],\n )\n tm.assert_series_equal(result, expected)\n\n def test_describe_bools(self):\n ser = Series([True, True, False, False, False], name="bool_data")\n result = ser.describe()\n expected = Series(\n [5, 2, False, 3], name="bool_data", index=["count", "unique", "top", "freq"]\n )\n tm.assert_series_equal(result, expected)\n\n def test_describe_strs(self):\n ser = Series(["a", "a", "b", "c", "d"], name="str_data")\n result = ser.describe()\n expected = Series(\n [5, 4, "a", 2], name="str_data", index=["count", "unique", "top", "freq"]\n )\n tm.assert_series_equal(result, expected)\n\n def test_describe_timedelta64(self):\n ser = Series(\n [\n Timedelta("1 days"),\n Timedelta("2 days"),\n Timedelta("3 days"),\n Timedelta("4 days"),\n Timedelta("5 days"),\n ],\n name="timedelta_data",\n )\n result = ser.describe()\n expected = Series(\n [5, ser[2], ser.std(), ser[0], ser[1], ser[2], ser[3], ser[4]],\n name="timedelta_data",\n index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],\n )\n tm.assert_series_equal(result, expected)\n\n def test_describe_period(self):\n ser = Series(\n [Period("2020-01", "M"), Period("2020-01", "M"), Period("2019-12", "M")],\n name="period_data",\n )\n result = ser.describe()\n expected = Series(\n [3, 2, ser[0], 2],\n name="period_data",\n index=["count", "unique", "top", "freq"],\n )\n tm.assert_series_equal(result, expected)\n\n def test_describe_empty_object(self):\n # https://github.com/pandas-dev/pandas/issues/27183\n s = Series([None, None], dtype=object)\n result = s.describe()\n expected = Series(\n [0, 0, np.nan, np.nan],\n dtype=object,\n index=["count", "unique", "top", "freq"],\n )\n tm.assert_series_equal(result, expected)\n\n result = s[:0].describe()\n tm.assert_series_equal(result, expected)\n # ensure NaN, not None\n assert np.isnan(result.iloc[2])\n assert np.isnan(result.iloc[3])\n\n def test_describe_with_tz(self, tz_naive_fixture):\n # GH 21332\n tz = tz_naive_fixture\n name = str(tz_naive_fixture)\n start = Timestamp(2018, 1, 1)\n end = Timestamp(2018, 1, 5)\n s = Series(date_range(start, end, tz=tz), name=name)\n result = s.describe()\n expected = Series(\n [\n 5,\n Timestamp(2018, 1, 3).tz_localize(tz),\n start.tz_localize(tz),\n s[1],\n s[2],\n s[3],\n end.tz_localize(tz),\n ],\n name=name,\n index=["count", "mean", "min", "25%", "50%", "75%", "max"],\n )\n tm.assert_series_equal(result, expected)\n\n def test_describe_with_tz_numeric(self):\n name = tz = "CET"\n start = Timestamp(2018, 1, 1)\n end = Timestamp(2018, 1, 5)\n s = Series(date_range(start, end, tz=tz), name=name)\n\n result = s.describe()\n\n expected = Series(\n [\n 5,\n Timestamp("2018-01-03 00:00:00", tz=tz),\n Timestamp("2018-01-01 00:00:00", tz=tz),\n Timestamp("2018-01-02 00:00:00", tz=tz),\n Timestamp("2018-01-03 00:00:00", tz=tz),\n Timestamp("2018-01-04 00:00:00", tz=tz),\n Timestamp("2018-01-05 00:00:00", tz=tz),\n ],\n name=name,\n index=["count", "mean", "min", "25%", "50%", "75%", "max"],\n )\n tm.assert_series_equal(result, expected)\n\n def test_datetime_is_numeric_includes_datetime(self):\n s = Series(date_range("2012", periods=3))\n result = s.describe()\n expected = Series(\n [\n 3,\n Timestamp("2012-01-02"),\n Timestamp("2012-01-01"),\n Timestamp("2012-01-01T12:00:00"),\n Timestamp("2012-01-02"),\n Timestamp("2012-01-02T12:00:00"),\n Timestamp("2012-01-03"),\n ],\n index=["count", "mean", "min", "25%", "50%", "75%", "max"],\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.filterwarnings("ignore:Casting complex values to real discards")\n def test_numeric_result_dtype(self, any_numeric_dtype):\n # GH#48340 - describe should always return float on non-complex numeric input\n if is_extension_array_dtype(any_numeric_dtype):\n dtype = "Float64"\n else:\n dtype = "complex128" if is_complex_dtype(any_numeric_dtype) else None\n\n ser = Series([0, 1], dtype=any_numeric_dtype)\n if dtype == "complex128" and np_version_gte1p25:\n with pytest.raises(\n TypeError, match=r"^a must be an array of real numbers$"\n ):\n ser.describe()\n return\n result = ser.describe()\n expected = Series(\n [\n 2.0,\n 0.5,\n ser.std(),\n 0,\n 0.25,\n 0.5,\n 0.75,\n 1.0,\n ],\n index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],\n dtype=dtype,\n )\n tm.assert_series_equal(result, expected)\n\n def test_describe_one_element_ea(self):\n # GH#52515\n ser = Series([0.0], dtype="Float64")\n with tm.assert_produces_warning(None):\n result = ser.describe()\n expected = Series(\n [1, 0, NA, 0, 0, 0, 0, 0],\n dtype="Float64",\n index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],\n )\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_describe.py | test_describe.py | Python | 6,646 | 0.95 | 0.073892 | 0.027174 | awesome-app | 320 | 2023-07-31T13:40:31.116543 | GPL-3.0 | true | 222e3b1a9d0a739bbeed3336c4c90132 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Series,\n TimedeltaIndex,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesDiff:\n def test_diff_np(self):\n # TODO(__array_function__): could make np.diff return a Series\n # matching ser.diff()\n\n ser = Series(np.arange(5))\n\n res = np.diff(ser)\n expected = np.array([1, 1, 1, 1])\n tm.assert_numpy_array_equal(res, expected)\n\n def test_diff_int(self):\n # int dtype\n a = 10000000000000000\n b = a + 1\n ser = Series([a, b])\n\n result = ser.diff()\n assert result[1] == 1\n\n def test_diff_tz(self):\n # Combined datetime diff, normal diff and boolean diff test\n ts = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n ts.diff()\n\n # neg n\n result = ts.diff(-1)\n expected = ts - ts.shift(-1)\n tm.assert_series_equal(result, expected)\n\n # 0\n result = ts.diff(0)\n expected = ts - ts\n tm.assert_series_equal(result, expected)\n\n def test_diff_dt64(self):\n # datetime diff (GH#3100)\n ser = Series(date_range("20130102", periods=5))\n result = ser.diff()\n expected = ser - ser.shift(1)\n tm.assert_series_equal(result, expected)\n\n # timedelta diff\n result = result - result.shift(1) # previous result\n expected = expected.diff() # previously expected\n tm.assert_series_equal(result, expected)\n\n def test_diff_dt64tz(self):\n # with tz\n ser = Series(\n date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"\n )\n result = ser.diff()\n expected = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "input,output,diff",\n [([False, True, True, False, False], [np.nan, True, False, True, False], 1)],\n )\n def test_diff_bool(self, input, output, diff):\n # boolean series (test for fixing #17294)\n ser = Series(input)\n result = ser.diff()\n expected = Series(output)\n tm.assert_series_equal(result, expected)\n\n def test_diff_object_dtype(self):\n # object series\n ser = Series([False, True, 5.0, np.nan, True, False])\n result = ser.diff()\n expected = ser - ser.shift(1)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_diff.py | test_diff.py | Python | 2,538 | 0.95 | 0.102273 | 0.150685 | node-utils | 876 | 2024-04-13T20:04:21.318061 | BSD-3-Clause | true | c47c3cdcbd86d2a8d7b15a666bbc0df7 |
import pytest\n\nfrom pandas import (\n Index,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.api.types import is_bool_dtype\n\n\n@pytest.mark.parametrize(\n "data, index, drop_labels, axis, expected_data, expected_index",\n [\n # Unique Index\n ([1, 2], ["one", "two"], ["two"], 0, [1], ["one"]),\n ([1, 2], ["one", "two"], ["two"], "rows", [1], ["one"]),\n ([1, 1, 2], ["one", "two", "one"], ["two"], 0, [1, 2], ["one", "one"]),\n # GH 5248 Non-Unique Index\n ([1, 1, 2], ["one", "two", "one"], "two", 0, [1, 2], ["one", "one"]),\n ([1, 1, 2], ["one", "two", "one"], ["one"], 0, [1], ["two"]),\n ([1, 1, 2], ["one", "two", "one"], "one", 0, [1], ["two"]),\n ],\n)\ndef test_drop_unique_and_non_unique_index(\n data, index, axis, drop_labels, expected_data, expected_index\n):\n ser = Series(data=data, index=index)\n result = ser.drop(drop_labels, axis=axis)\n expected = Series(data=expected_data, index=expected_index)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data, index, drop_labels, axis, error_type, error_desc",\n [\n # single string/tuple-like\n (range(3), list("abc"), "bc", 0, KeyError, "not found in axis"),\n # bad axis\n (range(3), list("abc"), ("a",), 0, KeyError, "not found in axis"),\n (range(3), list("abc"), "one", "columns", ValueError, "No axis named columns"),\n ],\n)\ndef test_drop_exception_raised(data, index, drop_labels, axis, error_type, error_desc):\n ser = Series(data, index=index)\n with pytest.raises(error_type, match=error_desc):\n ser.drop(drop_labels, axis=axis)\n\n\ndef test_drop_with_ignore_errors():\n # errors='ignore'\n ser = Series(range(3), index=list("abc"))\n result = ser.drop("bc", errors="ignore")\n tm.assert_series_equal(result, ser)\n result = ser.drop(["a", "d"], errors="ignore")\n expected = ser.iloc[1:]\n tm.assert_series_equal(result, expected)\n\n # GH 8522\n ser = Series([2, 3], index=[True, False])\n assert is_bool_dtype(ser.index)\n assert ser.index.dtype == bool\n result = ser.drop(True)\n expected = Series([3], index=[False])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("index", [[1, 2, 3], [1, 1, 3]])\n@pytest.mark.parametrize("drop_labels", [[], [1], [3]])\ndef test_drop_empty_list(index, drop_labels):\n # GH 21494\n expected_index = [i for i in index if i not in drop_labels]\n series = Series(index=index, dtype=object).drop(drop_labels)\n expected = Series(index=expected_index, dtype=object)\n tm.assert_series_equal(series, expected)\n\n\n@pytest.mark.parametrize(\n "data, index, drop_labels",\n [\n (None, [1, 2, 3], [1, 4]),\n (None, [1, 2, 2], [1, 4]),\n ([2, 3], [0, 1], [False, True]),\n ],\n)\ndef test_drop_non_empty_list(data, index, drop_labels):\n # GH 21494 and GH 16877\n dtype = object if data is None else None\n ser = Series(data=data, index=index, dtype=dtype)\n with pytest.raises(KeyError, match="not found in axis"):\n ser.drop(drop_labels)\n\n\ndef test_drop_index_ea_dtype(any_numeric_ea_dtype):\n # GH#45860\n df = Series(100, index=Index([1, 2, 2], dtype=any_numeric_ea_dtype))\n idx = Index([df.index[1]])\n result = df.drop(idx)\n expected = Series(100, index=Index([1], dtype=any_numeric_ea_dtype))\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_drop.py | test_drop.py | Python | 3,394 | 0.95 | 0.090909 | 0.105882 | react-lib | 741 | 2025-04-30T01:42:09.721258 | BSD-3-Clause | true | ee5f3e4c694ebcdf811860116c3241bd |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n IntervalIndex,\n NaT,\n Period,\n Series,\n Timestamp,\n)\nimport pandas._testing as tm\n\n\nclass TestDropna:\n def test_dropna_empty(self):\n ser = Series([], dtype=object)\n\n assert len(ser.dropna()) == 0\n return_value = ser.dropna(inplace=True)\n assert return_value is None\n assert len(ser) == 0\n\n # invalid axis\n msg = "No axis named 1 for object type Series"\n with pytest.raises(ValueError, match=msg):\n ser.dropna(axis=1)\n\n def test_dropna_preserve_name(self, datetime_series):\n datetime_series[:5] = np.nan\n result = datetime_series.dropna()\n assert result.name == datetime_series.name\n name = datetime_series.name\n ts = datetime_series.copy()\n return_value = ts.dropna(inplace=True)\n assert return_value is None\n assert ts.name == name\n\n def test_dropna_no_nan(self):\n for ser in [\n Series([1, 2, 3], name="x"),\n Series([False, True, False], name="x"),\n ]:\n result = ser.dropna()\n tm.assert_series_equal(result, ser)\n assert result is not ser\n\n s2 = ser.copy()\n return_value = s2.dropna(inplace=True)\n assert return_value is None\n tm.assert_series_equal(s2, ser)\n\n def test_dropna_intervals(self):\n ser = Series(\n [np.nan, 1, 2, 3],\n IntervalIndex.from_arrays([np.nan, 0, 1, 2], [np.nan, 1, 2, 3]),\n )\n\n result = ser.dropna()\n expected = ser.iloc[1:]\n tm.assert_series_equal(result, expected)\n\n def test_dropna_period_dtype(self):\n # GH#13737\n ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])\n result = ser.dropna()\n expected = Series([Period("2011-01", freq="M")])\n\n tm.assert_series_equal(result, expected)\n\n def test_datetime64_tz_dropna(self, unit):\n # DatetimeLikeBlock\n ser = Series(\n [\n Timestamp("2011-01-01 10:00"),\n NaT,\n Timestamp("2011-01-03 10:00"),\n NaT,\n ],\n dtype=f"M8[{unit}]",\n )\n result = ser.dropna()\n expected = Series(\n [Timestamp("2011-01-01 10:00"), Timestamp("2011-01-03 10:00")],\n index=[0, 2],\n dtype=f"M8[{unit}]",\n )\n tm.assert_series_equal(result, expected)\n\n # DatetimeTZBlock\n idx = DatetimeIndex(\n ["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz="Asia/Tokyo"\n ).as_unit(unit)\n ser = Series(idx)\n assert ser.dtype == f"datetime64[{unit}, Asia/Tokyo]"\n result = ser.dropna()\n expected = Series(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n Timestamp("2011-01-03 10:00", tz="Asia/Tokyo"),\n ],\n index=[0, 2],\n dtype=f"datetime64[{unit}, Asia/Tokyo]",\n )\n assert result.dtype == f"datetime64[{unit}, Asia/Tokyo]"\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("val", [1, 1.5])\n def test_dropna_ignore_index(self, val):\n # GH#31725\n ser = Series([1, 2, val], index=[3, 2, 1])\n result = ser.dropna(ignore_index=True)\n expected = Series([1, 2, val])\n tm.assert_series_equal(result, expected)\n\n ser.dropna(ignore_index=True, inplace=True)\n tm.assert_series_equal(ser, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_dropna.py | test_dropna.py | Python | 3,577 | 0.95 | 0.08547 | 0.049505 | python-kit | 13 | 2024-05-02T17:20:09.542645 | MIT | true | 1d01ed45e81d054379dcd30e3c8dda43 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n Series,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "keep, expected",\n [\n ("first", Series([False, False, False, False, True, True, False])),\n ("last", Series([False, True, True, False, False, False, False])),\n (False, Series([False, True, True, False, True, True, False])),\n ],\n)\ndef test_drop_duplicates(any_numpy_dtype, keep, expected):\n tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))\n\n if tc.dtype == "bool":\n pytest.skip("tested separately in test_drop_duplicates_bool")\n\n tm.assert_series_equal(tc.duplicated(keep=keep), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=keep, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n\n@pytest.mark.parametrize(\n "keep, expected",\n [\n ("first", Series([False, False, True, True])),\n ("last", Series([True, True, False, False])),\n (False, Series([True, True, True, True])),\n ],\n)\ndef test_drop_duplicates_bool(keep, expected):\n tc = Series([True, False, True, False])\n\n tm.assert_series_equal(tc.duplicated(keep=keep), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=keep, inplace=True)\n tm.assert_series_equal(sc, tc[~expected])\n assert return_value is None\n\n\n@pytest.mark.parametrize("values", [[], list(range(5))])\ndef test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):\n tc = Series(values, dtype=np.dtype(any_numpy_dtype))\n expected = Series([False] * len(tc), dtype="bool")\n\n if tc.dtype == "bool":\n # 0 -> False and 1-> True\n # any other value would be duplicated\n tc = tc[:2]\n expected = expected[:2]\n\n tm.assert_series_equal(tc.duplicated(keep=keep), expected)\n\n result_dropped = tc.drop_duplicates(keep=keep)\n tm.assert_series_equal(result_dropped, tc)\n\n # validate shallow copy\n assert result_dropped is not tc\n\n\nclass TestSeriesDropDuplicates:\n @pytest.fixture(\n params=["int_", "uint", "float64", "str_", "timedelta64[h]", "datetime64[D]"]\n )\n def dtype(self, request):\n return request.param\n\n @pytest.fixture\n def cat_series_unused_category(self, dtype, ordered):\n # Test case 1\n cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))\n\n input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))\n cat = Categorical(input1, categories=cat_array, ordered=ordered)\n tc1 = Series(cat)\n return tc1\n\n def test_drop_duplicates_categorical_non_bool(self, cat_series_unused_category):\n tc1 = cat_series_unused_category\n\n expected = Series([False, False, False, True])\n\n result = tc1.duplicated()\n tm.assert_series_equal(result, expected)\n\n result = tc1.drop_duplicates()\n tm.assert_series_equal(result, tc1[~expected])\n\n sc = tc1.copy()\n return_value = sc.drop_duplicates(inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc1[~expected])\n\n def test_drop_duplicates_categorical_non_bool_keeplast(\n self, cat_series_unused_category\n ):\n tc1 = cat_series_unused_category\n\n expected = Series([False, False, True, False])\n\n result = tc1.duplicated(keep="last")\n tm.assert_series_equal(result, expected)\n\n result = tc1.drop_duplicates(keep="last")\n tm.assert_series_equal(result, tc1[~expected])\n\n sc = tc1.copy()\n return_value = sc.drop_duplicates(keep="last", inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc1[~expected])\n\n def test_drop_duplicates_categorical_non_bool_keepfalse(\n self, cat_series_unused_category\n ):\n tc1 = cat_series_unused_category\n\n expected = Series([False, False, True, True])\n\n result = tc1.duplicated(keep=False)\n tm.assert_series_equal(result, expected)\n\n result = tc1.drop_duplicates(keep=False)\n tm.assert_series_equal(result, tc1[~expected])\n\n sc = tc1.copy()\n return_value = sc.drop_duplicates(keep=False, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc1[~expected])\n\n @pytest.fixture\n def cat_series(self, dtype, ordered):\n # no unused categories, unlike cat_series_unused_category\n cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))\n\n input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))\n cat = Categorical(input2, categories=cat_array, ordered=ordered)\n tc2 = Series(cat)\n return tc2\n\n def test_drop_duplicates_categorical_non_bool2(self, cat_series):\n tc2 = cat_series\n\n expected = Series([False, False, False, False, True, True, False])\n\n result = tc2.duplicated()\n tm.assert_series_equal(result, expected)\n\n result = tc2.drop_duplicates()\n tm.assert_series_equal(result, tc2[~expected])\n\n sc = tc2.copy()\n return_value = sc.drop_duplicates(inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc2[~expected])\n\n def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series):\n tc2 = cat_series\n\n expected = Series([False, True, True, False, False, False, False])\n\n result = tc2.duplicated(keep="last")\n tm.assert_series_equal(result, expected)\n\n result = tc2.drop_duplicates(keep="last")\n tm.assert_series_equal(result, tc2[~expected])\n\n sc = tc2.copy()\n return_value = sc.drop_duplicates(keep="last", inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc2[~expected])\n\n def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series):\n tc2 = cat_series\n\n expected = Series([False, True, True, False, True, True, False])\n\n result = tc2.duplicated(keep=False)\n tm.assert_series_equal(result, expected)\n\n result = tc2.drop_duplicates(keep=False)\n tm.assert_series_equal(result, tc2[~expected])\n\n sc = tc2.copy()\n return_value = sc.drop_duplicates(keep=False, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc2[~expected])\n\n def test_drop_duplicates_categorical_bool(self, ordered):\n tc = Series(\n Categorical(\n [True, False, True, False], categories=[True, False], ordered=ordered\n )\n )\n\n expected = Series([False, False, True, True])\n tm.assert_series_equal(tc.duplicated(), expected)\n tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n expected = Series([True, True, False, False])\n tm.assert_series_equal(tc.duplicated(keep="last"), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep="last"), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep="last", inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n expected = Series([True, True, True, True])\n tm.assert_series_equal(tc.duplicated(keep=False), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=False, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n def test_drop_duplicates_categorical_bool_na(self, nulls_fixture):\n # GH#44351\n ser = Series(\n Categorical(\n [True, False, True, False, nulls_fixture],\n categories=[True, False],\n ordered=True,\n )\n )\n result = ser.drop_duplicates()\n expected = Series(\n Categorical([True, False, np.nan], categories=[True, False], ordered=True),\n index=[0, 1, 4],\n )\n tm.assert_series_equal(result, expected)\n\n def test_drop_duplicates_ignore_index(self):\n # GH#48304\n ser = Series([1, 2, 2, 3])\n result = ser.drop_duplicates(ignore_index=True)\n expected = Series([1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n def test_duplicated_arrow_dtype(self):\n pytest.importorskip("pyarrow")\n ser = Series([True, False, None, False], dtype="bool[pyarrow]")\n result = ser.drop_duplicates()\n expected = Series([True, False, None], dtype="bool[pyarrow]")\n tm.assert_series_equal(result, expected)\n\n def test_drop_duplicates_arrow_strings(self):\n # GH#54904\n pa = pytest.importorskip("pyarrow")\n ser = Series(["a", "a"], dtype=pd.ArrowDtype(pa.string()))\n result = ser.drop_duplicates()\n expecetd = Series(["a"], dtype=pd.ArrowDtype(pa.string()))\n tm.assert_series_equal(result, expecetd)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_drop_duplicates.py | test_drop_duplicates.py | Python | 9,235 | 0.95 | 0.074906 | 0.038278 | node-utils | 77 | 2024-08-07T22:37:49.054099 | GPL-3.0 | true | 222004c451444b9fd45e9041d7cac406 |
import numpy as np\n\n\nclass TestSeriesDtypes:\n def test_dtype(self, datetime_series):\n assert datetime_series.dtype == np.dtype("float64")\n assert datetime_series.dtypes == np.dtype("float64")\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_dtypes.py | test_dtypes.py | Python | 209 | 0.85 | 0.285714 | 0 | react-lib | 111 | 2025-03-05T23:53:43.097062 | MIT | true | 228a54c5d7157b2576be19a0cfbc718d |
import numpy as np\nimport pytest\n\nfrom pandas import (\n NA,\n Categorical,\n Series,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "keep, expected",\n [\n ("first", Series([False, False, True, False, True], name="name")),\n ("last", Series([True, True, False, False, False], name="name")),\n (False, Series([True, True, True, False, True], name="name")),\n ],\n)\ndef test_duplicated_keep(keep, expected):\n ser = Series(["a", "b", "b", "c", "a"], name="name")\n\n result = ser.duplicated(keep=keep)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "keep, expected",\n [\n ("first", Series([False, False, True, False, True])),\n ("last", Series([True, True, False, False, False])),\n (False, Series([True, True, True, False, True])),\n ],\n)\ndef test_duplicated_nan_none(keep, expected):\n ser = Series([np.nan, 3, 3, None, np.nan], dtype=object)\n\n result = ser.duplicated(keep=keep)\n tm.assert_series_equal(result, expected)\n\n\ndef test_duplicated_categorical_bool_na(nulls_fixture):\n # GH#44351\n ser = Series(\n Categorical(\n [True, False, True, False, nulls_fixture],\n categories=[True, False],\n ordered=True,\n )\n )\n result = ser.duplicated()\n expected = Series([False, False, True, True, False])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "keep, vals",\n [\n ("last", [True, True, False]),\n ("first", [False, True, True]),\n (False, [True, True, True]),\n ],\n)\ndef test_duplicated_mask(keep, vals):\n # GH#48150\n ser = Series([1, 2, NA, NA, NA], dtype="Int64")\n result = ser.duplicated(keep=keep)\n expected = Series([False, False] + vals)\n tm.assert_series_equal(result, expected)\n\n\ndef test_duplicated_mask_no_duplicated_na(keep):\n # GH#48150\n ser = Series([1, 2, NA], dtype="Int64")\n result = ser.duplicated(keep=keep)\n expected = Series([False, False, False])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_duplicated.py | test_duplicated.py | Python | 2,059 | 0.95 | 0.064935 | 0.046875 | react-lib | 15 | 2023-07-14T01:58:14.463765 | MIT | true | 8db6e5c58112e20acacf7dd296941a95 |
from contextlib import nullcontext\nimport copy\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.missing import is_matching_na\nfrom pandas.compat.numpy import np_version_gte1p25\n\nfrom pandas.core.dtypes.common import is_float\n\nfrom pandas import (\n Index,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "arr, idx",\n [\n ([1, 2, 3, 4], [0, 2, 1, 3]),\n ([1, np.nan, 3, np.nan], [0, 2, 1, 3]),\n (\n [1, np.nan, 3, np.nan],\n MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c"), (3, "c")]),\n ),\n ],\n)\ndef test_equals(arr, idx):\n s1 = Series(arr, index=idx)\n s2 = s1.copy()\n assert s1.equals(s2)\n\n s1[1] = 9\n assert not s1.equals(s2)\n\n\n@pytest.mark.parametrize(\n "val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None]\n)\ndef test_equals_list_array(val):\n # GH20676 Verify equals operator for list of Numpy arrays\n arr = np.array([1, 2])\n s1 = Series([arr, arr])\n s2 = s1.copy()\n assert s1.equals(s2)\n\n s1[1] = val\n\n cm = (\n tm.assert_produces_warning(FutureWarning, check_stacklevel=False)\n if isinstance(val, str) and not np_version_gte1p25\n else nullcontext()\n )\n with cm:\n assert not s1.equals(s2)\n\n\ndef test_equals_false_negative():\n # GH8437 Verify false negative behavior of equals function for dtype object\n arr = [False, np.nan]\n s1 = Series(arr)\n s2 = s1.copy()\n s3 = Series(index=range(2), dtype=object)\n s4 = s3.copy()\n s5 = s3.copy()\n s6 = s3.copy()\n\n s3[:-1] = s4[:-1] = s5[0] = s6[0] = False\n assert s1.equals(s1)\n assert s1.equals(s2)\n assert s1.equals(s3)\n assert s1.equals(s4)\n assert s1.equals(s5)\n assert s5.equals(s6)\n\n\ndef test_equals_matching_nas():\n # matching but not identical NAs\n left = Series([np.datetime64("NaT")], dtype=object)\n right = Series([np.datetime64("NaT")], dtype=object)\n assert left.equals(right)\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n assert Index(left).equals(Index(right))\n assert left.array.equals(right.array)\n\n left = Series([np.timedelta64("NaT")], dtype=object)\n right = Series([np.timedelta64("NaT")], dtype=object)\n assert left.equals(right)\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n assert Index(left).equals(Index(right))\n assert left.array.equals(right.array)\n\n left = Series([np.float64("NaN")], dtype=object)\n right = Series([np.float64("NaN")], dtype=object)\n assert left.equals(right)\n assert Index(left, dtype=left.dtype).equals(Index(right, dtype=right.dtype))\n assert left.array.equals(right.array)\n\n\ndef test_equals_mismatched_nas(nulls_fixture, nulls_fixture2):\n # GH#39650\n left = nulls_fixture\n right = nulls_fixture2\n if hasattr(right, "copy"):\n right = right.copy()\n else:\n right = copy.copy(right)\n\n ser = Series([left], dtype=object)\n ser2 = Series([right], dtype=object)\n\n if is_matching_na(left, right):\n assert ser.equals(ser2)\n elif (left is None and is_float(right)) or (right is None and is_float(left)):\n assert ser.equals(ser2)\n else:\n assert not ser.equals(ser2)\n\n\ndef test_equals_none_vs_nan():\n # GH#39650\n ser = Series([1, None], dtype=object)\n ser2 = Series([1, np.nan], dtype=object)\n\n assert ser.equals(ser2)\n assert Index(ser, dtype=ser.dtype).equals(Index(ser2, dtype=ser2.dtype))\n assert ser.array.equals(ser2.array)\n\n\ndef test_equals_None_vs_float():\n # GH#44190\n left = Series([-np.inf, np.nan, -1.0, 0.0, 1.0, 10 / 3, np.inf], dtype=object)\n right = Series([None] * len(left))\n\n # these series were found to be equal due to a bug, check that they are correctly\n # found to not equal\n assert not left.equals(right)\n assert not right.equals(left)\n assert not left.to_frame().equals(right.to_frame())\n assert not right.to_frame().equals(left.to_frame())\n assert not Index(left, dtype="object").equals(Index(right, dtype="object"))\n assert not Index(right, dtype="object").equals(Index(left, dtype="object"))\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_equals.py | test_equals.py | Python | 4,182 | 0.95 | 0.089655 | 0.068376 | node-utils | 458 | 2024-07-31T06:33:58.012916 | Apache-2.0 | true | abb2be29c5600ecf6e60cf598adb1b9f |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\ndef test_basic():\n s = pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd"), name="foo")\n result = s.explode()\n expected = pd.Series(\n [0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object, name="foo"\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_mixed_type():\n s = pd.Series(\n [[0, 1, 2], np.nan, None, np.array([]), pd.Series(["a", "b"])], name="foo"\n )\n result = s.explode()\n expected = pd.Series(\n [0, 1, 2, np.nan, None, np.nan, "a", "b"],\n index=[0, 0, 0, 1, 2, 3, 4, 4],\n dtype=object,\n name="foo",\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_empty():\n s = pd.Series(dtype=object)\n result = s.explode()\n expected = s.copy()\n tm.assert_series_equal(result, expected)\n\n\ndef test_nested_lists():\n s = pd.Series([[[1, 2, 3]], [1, 2], 1])\n result = s.explode()\n expected = pd.Series([[1, 2, 3], 1, 2, 1], index=[0, 1, 1, 2])\n tm.assert_series_equal(result, expected)\n\n\ndef test_multi_index():\n s = pd.Series(\n [[0, 1, 2], np.nan, [], (3, 4)],\n name="foo",\n index=pd.MultiIndex.from_product([list("ab"), range(2)], names=["foo", "bar"]),\n )\n result = s.explode()\n index = pd.MultiIndex.from_tuples(\n [("a", 0), ("a", 0), ("a", 0), ("a", 1), ("b", 0), ("b", 1), ("b", 1)],\n names=["foo", "bar"],\n )\n expected = pd.Series(\n [0, 1, 2, np.nan, np.nan, 3, 4], index=index, dtype=object, name="foo"\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_large():\n s = pd.Series([range(256)]).explode()\n result = s.explode()\n tm.assert_series_equal(result, s)\n\n\ndef test_invert_array():\n df = pd.DataFrame({"a": pd.date_range("20190101", periods=3, tz="UTC")})\n\n listify = df.apply(lambda x: x.array, axis=1)\n result = listify.explode()\n tm.assert_series_equal(result, df["a"].rename())\n\n\n@pytest.mark.parametrize(\n "s", [pd.Series([1, 2, 3]), pd.Series(pd.date_range("2019", periods=3, tz="UTC"))]\n)\ndef test_non_object_dtype(s):\n result = s.explode()\n tm.assert_series_equal(result, s)\n\n\ndef test_typical_usecase():\n df = pd.DataFrame(\n [{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}],\n columns=["var1", "var2"],\n )\n exploded = df.var1.str.split(",").explode()\n result = df[["var2"]].join(exploded)\n expected = pd.DataFrame(\n {"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")},\n columns=["var2", "var1"],\n index=[0, 0, 0, 1, 1, 1],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nested_EA():\n # a nested EA array\n s = pd.Series(\n [\n pd.date_range("20170101", periods=3, tz="UTC"),\n pd.date_range("20170104", periods=3, tz="UTC"),\n ]\n )\n result = s.explode()\n expected = pd.Series(\n pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1]\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_duplicate_index():\n # GH 28005\n s = pd.Series([[1, 2], [3, 4]], index=[0, 0])\n result = s.explode()\n expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object)\n tm.assert_series_equal(result, expected)\n\n\ndef test_ignore_index():\n # GH 34932\n s = pd.Series([[1, 2], [3, 4]])\n result = s.explode(ignore_index=True)\n expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object)\n tm.assert_series_equal(result, expected)\n\n\ndef test_explode_sets():\n # https://github.com/pandas-dev/pandas/issues/35614\n s = pd.Series([{"a", "b", "c"}], index=[1])\n result = s.explode().sort_values()\n expected = pd.Series(["a", "b", "c"], index=[1, 1, 1])\n tm.assert_series_equal(result, expected)\n\n\ndef test_explode_scalars_can_ignore_index():\n # https://github.com/pandas-dev/pandas/issues/40487\n s = pd.Series([1, 2, 3], index=["a", "b", "c"])\n result = s.explode(ignore_index=True)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("ignore_index", [True, False])\ndef test_explode_pyarrow_list_type(ignore_index):\n # GH 53602\n pa = pytest.importorskip("pyarrow")\n\n data = [\n [None, None],\n [1],\n [],\n [2, 3],\n None,\n ]\n ser = pd.Series(data, dtype=pd.ArrowDtype(pa.list_(pa.int64())))\n result = ser.explode(ignore_index=ignore_index)\n expected = pd.Series(\n data=[None, None, 1, None, 2, 3, None],\n index=None if ignore_index else [0, 0, 1, 2, 3, 3, 4],\n dtype=pd.ArrowDtype(pa.int64()),\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("ignore_index", [True, False])\ndef test_explode_pyarrow_non_list_type(ignore_index):\n pa = pytest.importorskip("pyarrow")\n data = [1, 2, 3]\n ser = pd.Series(data, dtype=pd.ArrowDtype(pa.int64()))\n result = ser.explode(ignore_index=ignore_index)\n expected = pd.Series([1, 2, 3], dtype="int64[pyarrow]", index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_explode.py | test_explode.py | Python | 5,110 | 0.95 | 0.097143 | 0.042857 | vue-tools | 164 | 2025-05-12T01:16:42.413662 | BSD-3-Clause | true | c1393fd7dd69854ef3e37673a88d601e |
from datetime import (\n datetime,\n timedelta,\n timezone,\n)\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas import (\n Categorical,\n DataFrame,\n DatetimeIndex,\n NaT,\n Period,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n isna,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import period_array\n\n\n@pytest.mark.filterwarnings(\n "ignore:(Series|DataFrame).fillna with 'method' is deprecated:FutureWarning"\n)\nclass TestSeriesFillNA:\n def test_fillna_nat(self):\n series = Series([0, 1, 2, NaT._value], dtype="M8[ns]")\n\n filled = series.fillna(method="pad")\n filled2 = series.fillna(value=series.values[2])\n\n expected = series.copy()\n expected.iloc[3] = expected.iloc[2]\n\n tm.assert_series_equal(filled, expected)\n tm.assert_series_equal(filled2, expected)\n\n df = DataFrame({"A": series})\n filled = df.fillna(method="pad")\n filled2 = df.fillna(value=series.values[2])\n expected = DataFrame({"A": expected})\n tm.assert_frame_equal(filled, expected)\n tm.assert_frame_equal(filled2, expected)\n\n series = Series([NaT._value, 0, 1, 2], dtype="M8[ns]")\n\n filled = series.fillna(method="bfill")\n filled2 = series.fillna(value=series[1])\n\n expected = series.copy()\n expected[0] = expected[1]\n\n tm.assert_series_equal(filled, expected)\n tm.assert_series_equal(filled2, expected)\n\n df = DataFrame({"A": series})\n filled = df.fillna(method="bfill")\n filled2 = df.fillna(value=series[1])\n expected = DataFrame({"A": expected})\n tm.assert_frame_equal(filled, expected)\n tm.assert_frame_equal(filled2, expected)\n\n def test_fillna_value_or_method(self, datetime_series):\n msg = "Cannot specify both 'value' and 'method'"\n with pytest.raises(ValueError, match=msg):\n datetime_series.fillna(value=0, method="ffill")\n\n def test_fillna(self):\n ts = Series(\n [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5)\n )\n\n tm.assert_series_equal(ts, ts.fillna(method="ffill"))\n\n ts.iloc[2] = np.nan\n\n exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)\n tm.assert_series_equal(ts.fillna(method="ffill"), exp)\n\n exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)\n tm.assert_series_equal(ts.fillna(method="backfill"), exp)\n\n exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)\n tm.assert_series_equal(ts.fillna(value=5), exp)\n\n msg = "Must specify a fill 'value' or 'method'"\n with pytest.raises(ValueError, match=msg):\n ts.fillna()\n\n def test_fillna_nonscalar(self):\n # GH#5703\n s1 = Series([np.nan])\n s2 = Series([1])\n result = s1.fillna(s2)\n expected = Series([1.0])\n tm.assert_series_equal(result, expected)\n result = s1.fillna({})\n tm.assert_series_equal(result, s1)\n result = s1.fillna(Series((), dtype=object))\n tm.assert_series_equal(result, s1)\n result = s2.fillna(s1)\n tm.assert_series_equal(result, s2)\n result = s1.fillna({0: 1})\n tm.assert_series_equal(result, expected)\n result = s1.fillna({1: 1})\n tm.assert_series_equal(result, Series([np.nan]))\n result = s1.fillna({0: 1, 1: 1})\n tm.assert_series_equal(result, expected)\n result = s1.fillna(Series({0: 1, 1: 1}))\n tm.assert_series_equal(result, expected)\n result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))\n tm.assert_series_equal(result, s1)\n\n def test_fillna_aligns(self):\n s1 = Series([0, 1, 2], list("abc"))\n s2 = Series([0, np.nan, 2], list("bac"))\n result = s2.fillna(s1)\n expected = Series([0, 0, 2.0], list("bac"))\n tm.assert_series_equal(result, expected)\n\n def test_fillna_limit(self):\n ser = Series(np.nan, index=[0, 1, 2])\n result = ser.fillna(999, limit=1)\n expected = Series([999, np.nan, np.nan], index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n result = ser.fillna(999, limit=2)\n expected = Series([999, 999, np.nan], index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n def test_fillna_dont_cast_strings(self):\n # GH#9043\n # make sure a string representation of int/float values can be filled\n # correctly without raising errors or being converted\n vals = ["0", "1.5", "-0.3"]\n for val in vals:\n ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")\n result = ser.fillna(val)\n expected = Series([0, 1, val, val, 4], dtype="object")\n tm.assert_series_equal(result, expected)\n\n def test_fillna_consistency(self):\n # GH#16402\n # fillna with a tz aware to a tz-naive, should result in object\n\n ser = Series([Timestamp("20130101"), NaT])\n\n result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))\n expected = Series(\n [Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],\n dtype="object",\n )\n tm.assert_series_equal(result, expected)\n\n result = ser.where([True, False], Timestamp("20130101", tz="US/Eastern"))\n tm.assert_series_equal(result, expected)\n\n result = ser.where([True, False], Timestamp("20130101", tz="US/Eastern"))\n tm.assert_series_equal(result, expected)\n\n # with a non-datetime\n result = ser.fillna("foo")\n expected = Series([Timestamp("20130101"), "foo"])\n tm.assert_series_equal(result, expected)\n\n # assignment\n ser2 = ser.copy()\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n ser2[1] = "foo"\n tm.assert_series_equal(ser2, expected)\n\n def test_fillna_downcast(self):\n # GH#15277\n # infer int64 from float64\n ser = Series([1.0, np.nan])\n msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser.fillna(0, downcast="infer")\n expected = Series([1, 0])\n tm.assert_series_equal(result, expected)\n\n # infer int64 from float64 when fillna value is a dict\n ser = Series([1.0, np.nan])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser.fillna({1: 0}, downcast="infer")\n expected = Series([1, 0])\n tm.assert_series_equal(result, expected)\n\n def test_fillna_downcast_infer_objects_to_numeric(self):\n # GH#44241 if we have object-dtype, 'downcast="infer"' should\n # _actually_ infer\n\n arr = np.arange(5).astype(object)\n arr[3] = np.nan\n\n ser = Series(arr)\n\n msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = ser.fillna(3, downcast="infer")\n expected = Series(np.arange(5), dtype=np.int64)\n tm.assert_series_equal(res, expected)\n\n msg = "The 'downcast' keyword in ffill is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = ser.ffill(downcast="infer")\n expected = Series([0, 1, 2, 2, 4], dtype=np.int64)\n tm.assert_series_equal(res, expected)\n\n msg = "The 'downcast' keyword in bfill is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = ser.bfill(downcast="infer")\n expected = Series([0, 1, 2, 4, 4], dtype=np.int64)\n tm.assert_series_equal(res, expected)\n\n # with a non-round float present, we will downcast to float64\n ser[2] = 2.5\n\n expected = Series([0, 1, 2.5, 3, 4], dtype=np.float64)\n msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = ser.fillna(3, downcast="infer")\n tm.assert_series_equal(res, expected)\n\n msg = "The 'downcast' keyword in ffill is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = ser.ffill(downcast="infer")\n expected = Series([0, 1, 2.5, 2.5, 4], dtype=np.float64)\n tm.assert_series_equal(res, expected)\n\n msg = "The 'downcast' keyword in bfill is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = ser.bfill(downcast="infer")\n expected = Series([0, 1, 2.5, 4, 4], dtype=np.float64)\n tm.assert_series_equal(res, expected)\n\n def test_timedelta_fillna(self, frame_or_series, unit):\n # GH#3371\n ser = Series(\n [\n Timestamp("20130101"),\n Timestamp("20130101"),\n Timestamp("20130102"),\n Timestamp("20130103 9:01:01"),\n ],\n dtype=f"M8[{unit}]",\n )\n td = ser.diff()\n obj = frame_or_series(td).copy()\n\n # reg fillna\n result = obj.fillna(Timedelta(seconds=0))\n expected = Series(\n [\n timedelta(0),\n timedelta(0),\n timedelta(1),\n timedelta(days=1, seconds=9 * 3600 + 60 + 1),\n ],\n dtype=f"m8[{unit}]",\n )\n expected = frame_or_series(expected)\n tm.assert_equal(result, expected)\n\n # GH#45746 pre-1.? ints were interpreted as seconds. then that was\n # deprecated and changed to raise. In 2.0 it casts to common dtype,\n # consistent with every other dtype's behavior\n res = obj.fillna(1)\n expected = obj.astype(object).fillna(1)\n tm.assert_equal(res, expected)\n\n result = obj.fillna(Timedelta(seconds=1))\n expected = Series(\n [\n timedelta(seconds=1),\n timedelta(0),\n timedelta(1),\n timedelta(days=1, seconds=9 * 3600 + 60 + 1),\n ],\n dtype=f"m8[{unit}]",\n )\n expected = frame_or_series(expected)\n tm.assert_equal(result, expected)\n\n result = obj.fillna(timedelta(days=1, seconds=1))\n expected = Series(\n [\n timedelta(days=1, seconds=1),\n timedelta(0),\n timedelta(1),\n timedelta(days=1, seconds=9 * 3600 + 60 + 1),\n ],\n dtype=f"m8[{unit}]",\n )\n expected = frame_or_series(expected)\n tm.assert_equal(result, expected)\n\n result = obj.fillna(np.timedelta64(10**9))\n expected = Series(\n [\n timedelta(seconds=1),\n timedelta(0),\n timedelta(1),\n timedelta(days=1, seconds=9 * 3600 + 60 + 1),\n ],\n dtype=f"m8[{unit}]",\n )\n expected = frame_or_series(expected)\n tm.assert_equal(result, expected)\n\n result = obj.fillna(NaT)\n expected = Series(\n [\n NaT,\n timedelta(0),\n timedelta(1),\n timedelta(days=1, seconds=9 * 3600 + 60 + 1),\n ],\n dtype=f"m8[{unit}]",\n )\n expected = frame_or_series(expected)\n tm.assert_equal(result, expected)\n\n # ffill\n td[2] = np.nan\n obj = frame_or_series(td).copy()\n result = obj.ffill()\n expected = td.fillna(Timedelta(seconds=0))\n expected[0] = np.nan\n expected = frame_or_series(expected)\n\n tm.assert_equal(result, expected)\n\n # bfill\n td[2] = np.nan\n obj = frame_or_series(td)\n result = obj.bfill()\n expected = td.fillna(Timedelta(seconds=0))\n expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)\n expected = frame_or_series(expected)\n tm.assert_equal(result, expected)\n\n def test_datetime64_fillna(self):\n ser = Series(\n [\n Timestamp("20130101"),\n Timestamp("20130101"),\n Timestamp("20130102"),\n Timestamp("20130103 9:01:01"),\n ]\n )\n ser[2] = np.nan\n\n # ffill\n result = ser.ffill()\n expected = Series(\n [\n Timestamp("20130101"),\n Timestamp("20130101"),\n Timestamp("20130101"),\n Timestamp("20130103 9:01:01"),\n ]\n )\n tm.assert_series_equal(result, expected)\n\n # bfill\n result = ser.bfill()\n expected = Series(\n [\n Timestamp("20130101"),\n Timestamp("20130101"),\n Timestamp("20130103 9:01:01"),\n Timestamp("20130103 9:01:01"),\n ]\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "scalar",\n [\n False,\n pytest.param(\n True,\n marks=pytest.mark.xfail(\n reason="GH#56410 scalar case not yet addressed"\n ),\n ),\n ],\n )\n @pytest.mark.parametrize("tz", [None, "UTC"])\n def test_datetime64_fillna_mismatched_reso_no_rounding(self, tz, scalar):\n # GH#56410\n dti = date_range("2016-01-01", periods=3, unit="s", tz=tz)\n item = Timestamp("2016-02-03 04:05:06.789", tz=tz)\n vec = date_range(item, periods=3, unit="ms")\n\n exp_dtype = "M8[ms]" if tz is None else "M8[ms, UTC]"\n expected = Series([item, dti[1], dti[2]], dtype=exp_dtype)\n\n ser = Series(dti)\n ser[0] = NaT\n ser2 = ser.copy()\n\n res = ser.fillna(item)\n res2 = ser2.fillna(Series(vec))\n\n if scalar:\n tm.assert_series_equal(res, expected)\n else:\n tm.assert_series_equal(res2, expected)\n\n @pytest.mark.parametrize(\n "scalar",\n [\n False,\n pytest.param(\n True,\n marks=pytest.mark.xfail(\n reason="GH#56410 scalar case not yet addressed"\n ),\n ),\n ],\n )\n def test_timedelta64_fillna_mismatched_reso_no_rounding(self, scalar):\n # GH#56410\n tdi = date_range("2016-01-01", periods=3, unit="s") - Timestamp("1970-01-01")\n item = Timestamp("2016-02-03 04:05:06.789") - Timestamp("1970-01-01")\n vec = timedelta_range(item, periods=3, unit="ms")\n\n expected = Series([item, tdi[1], tdi[2]], dtype="m8[ms]")\n\n ser = Series(tdi)\n ser[0] = NaT\n ser2 = ser.copy()\n\n res = ser.fillna(item)\n res2 = ser2.fillna(Series(vec))\n\n if scalar:\n tm.assert_series_equal(res, expected)\n else:\n tm.assert_series_equal(res2, expected)\n\n def test_datetime64_fillna_backfill(self):\n # GH#6587\n # make sure that we are treating as integer when filling\n ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"], dtype="M8[ns]")\n\n expected = Series(\n [\n "2013-08-05 15:30:00.000001",\n "2013-08-05 15:30:00.000001",\n "2013-08-05 15:30:00.000001",\n ],\n dtype="M8[ns]",\n )\n result = ser.fillna(method="backfill")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])\n def test_datetime64_tz_fillna(self, tz, unit):\n # DatetimeLikeBlock\n ser = Series(\n [\n Timestamp("2011-01-01 10:00"),\n NaT,\n Timestamp("2011-01-03 10:00"),\n NaT,\n ],\n dtype=f"M8[{unit}]",\n )\n null_loc = Series([False, True, False, True])\n\n result = ser.fillna(Timestamp("2011-01-02 10:00"))\n expected = Series(\n [\n Timestamp("2011-01-01 10:00"),\n Timestamp("2011-01-02 10:00"),\n Timestamp("2011-01-03 10:00"),\n Timestamp("2011-01-02 10:00"),\n ],\n dtype=f"M8[{unit}]",\n )\n tm.assert_series_equal(expected, result)\n # check s is not changed\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))\n expected = Series(\n [\n Timestamp("2011-01-01 10:00"),\n Timestamp("2011-01-02 10:00", tz=tz),\n Timestamp("2011-01-03 10:00"),\n Timestamp("2011-01-02 10:00", tz=tz),\n ]\n )\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna("AAA")\n expected = Series(\n [\n Timestamp("2011-01-01 10:00"),\n "AAA",\n Timestamp("2011-01-03 10:00"),\n "AAA",\n ],\n dtype=object,\n )\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna(\n {\n 1: Timestamp("2011-01-02 10:00", tz=tz),\n 3: Timestamp("2011-01-04 10:00"),\n }\n )\n expected = Series(\n [\n Timestamp("2011-01-01 10:00"),\n Timestamp("2011-01-02 10:00", tz=tz),\n Timestamp("2011-01-03 10:00"),\n Timestamp("2011-01-04 10:00"),\n ]\n )\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna(\n {1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}\n )\n expected = Series(\n [\n Timestamp("2011-01-01 10:00"),\n Timestamp("2011-01-02 10:00"),\n Timestamp("2011-01-03 10:00"),\n Timestamp("2011-01-04 10:00"),\n ],\n dtype=f"M8[{unit}]",\n )\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n # DatetimeTZBlock\n idx = DatetimeIndex(\n ["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz\n ).as_unit(unit)\n ser = Series(idx)\n assert ser.dtype == f"datetime64[{unit}, {tz}]"\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna(Timestamp("2011-01-02 10:00"))\n expected = Series(\n [\n Timestamp("2011-01-01 10:00", tz=tz),\n Timestamp("2011-01-02 10:00"),\n Timestamp("2011-01-03 10:00", tz=tz),\n Timestamp("2011-01-02 10:00"),\n ]\n )\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))\n idx = DatetimeIndex(\n [\n "2011-01-01 10:00",\n "2011-01-02 10:00",\n "2011-01-03 10:00",\n "2011-01-02 10:00",\n ],\n tz=tz,\n ).as_unit(unit)\n expected = Series(idx)\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())\n idx = DatetimeIndex(\n [\n "2011-01-01 10:00",\n "2011-01-02 10:00",\n "2011-01-03 10:00",\n "2011-01-02 10:00",\n ],\n tz=tz,\n ).as_unit(unit)\n expected = Series(idx)\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna("AAA")\n expected = Series(\n [\n Timestamp("2011-01-01 10:00", tz=tz),\n "AAA",\n Timestamp("2011-01-03 10:00", tz=tz),\n "AAA",\n ],\n dtype=object,\n )\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna(\n {\n 1: Timestamp("2011-01-02 10:00", tz=tz),\n 3: Timestamp("2011-01-04 10:00"),\n }\n )\n expected = Series(\n [\n Timestamp("2011-01-01 10:00", tz=tz),\n Timestamp("2011-01-02 10:00", tz=tz),\n Timestamp("2011-01-03 10:00", tz=tz),\n Timestamp("2011-01-04 10:00"),\n ]\n )\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n result = ser.fillna(\n {\n 1: Timestamp("2011-01-02 10:00", tz=tz),\n 3: Timestamp("2011-01-04 10:00", tz=tz),\n }\n )\n expected = Series(\n [\n Timestamp("2011-01-01 10:00", tz=tz),\n Timestamp("2011-01-02 10:00", tz=tz),\n Timestamp("2011-01-03 10:00", tz=tz),\n Timestamp("2011-01-04 10:00", tz=tz),\n ]\n ).dt.as_unit(unit)\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n # filling with a naive/other zone, coerce to object\n result = ser.fillna(Timestamp("20130101"))\n expected = Series(\n [\n Timestamp("2011-01-01 10:00", tz=tz),\n Timestamp("2013-01-01"),\n Timestamp("2011-01-03 10:00", tz=tz),\n Timestamp("2013-01-01"),\n ]\n )\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n # pre-2.0 fillna with mixed tzs would cast to object, in 2.0\n # it retains dtype.\n result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))\n expected = Series(\n [\n Timestamp("2011-01-01 10:00", tz=tz),\n Timestamp("2013-01-01", tz="US/Pacific").tz_convert(tz),\n Timestamp("2011-01-03 10:00", tz=tz),\n Timestamp("2013-01-01", tz="US/Pacific").tz_convert(tz),\n ]\n ).dt.as_unit(unit)\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(isna(ser), null_loc)\n\n def test_fillna_dt64tz_with_method(self):\n # with timezone\n # GH#15855\n ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT])\n exp = Series(\n [\n Timestamp("2012-11-11 00:00:00+01:00"),\n Timestamp("2012-11-11 00:00:00+01:00"),\n ]\n )\n tm.assert_series_equal(ser.fillna(method="pad"), exp)\n\n ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")])\n exp = Series(\n [\n Timestamp("2012-11-11 00:00:00+01:00"),\n Timestamp("2012-11-11 00:00:00+01:00"),\n ]\n )\n tm.assert_series_equal(ser.fillna(method="bfill"), exp)\n\n def test_fillna_pytimedelta(self):\n # GH#8209\n ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])\n\n result = ser.fillna(timedelta(1))\n expected = Series(Timedelta("1 days"), index=["A", "B"])\n tm.assert_series_equal(result, expected)\n\n def test_fillna_period(self):\n # GH#13737\n ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])\n\n res = ser.fillna(Period("2012-01", freq="M"))\n exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")])\n tm.assert_series_equal(res, exp)\n assert res.dtype == "Period[M]"\n\n def test_fillna_dt64_timestamp(self, frame_or_series):\n ser = Series(\n [\n Timestamp("20130101"),\n Timestamp("20130101"),\n Timestamp("20130102"),\n Timestamp("20130103 9:01:01"),\n ]\n )\n ser[2] = np.nan\n obj = frame_or_series(ser)\n\n # reg fillna\n result = obj.fillna(Timestamp("20130104"))\n expected = Series(\n [\n Timestamp("20130101"),\n Timestamp("20130101"),\n Timestamp("20130104"),\n Timestamp("20130103 9:01:01"),\n ]\n )\n expected = frame_or_series(expected)\n tm.assert_equal(result, expected)\n\n result = obj.fillna(NaT)\n expected = obj\n tm.assert_equal(result, expected)\n\n def test_fillna_dt64_non_nao(self):\n # GH#27419\n ser = Series([Timestamp("2010-01-01"), NaT, Timestamp("2000-01-01")])\n val = np.datetime64("1975-04-05", "ms")\n\n result = ser.fillna(val)\n expected = Series(\n [Timestamp("2010-01-01"), Timestamp("1975-04-05"), Timestamp("2000-01-01")]\n )\n tm.assert_series_equal(result, expected)\n\n def test_fillna_numeric_inplace(self):\n x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])\n y = x.copy()\n\n return_value = y.fillna(value=0, inplace=True)\n assert return_value is None\n\n expected = x.fillna(value=0)\n tm.assert_series_equal(y, expected)\n\n # ---------------------------------------------------------------\n # CategoricalDtype\n\n @pytest.mark.parametrize(\n "fill_value, expected_output",\n [\n ("a", ["a", "a", "b", "a", "a"]),\n ({1: "a", 3: "b", 4: "b"}, ["a", "a", "b", "b", "b"]),\n ({1: "a"}, ["a", "a", "b", np.nan, np.nan]),\n ({1: "a", 3: "b"}, ["a", "a", "b", "b", np.nan]),\n (Series("a"), ["a", np.nan, "b", np.nan, np.nan]),\n (Series("a", index=[1]), ["a", "a", "b", np.nan, np.nan]),\n (Series({1: "a", 3: "b"}), ["a", "a", "b", "b", np.nan]),\n (Series(["a", "b"], index=[3, 4]), ["a", np.nan, "b", "a", "b"]),\n ],\n )\n def test_fillna_categorical(self, fill_value, expected_output):\n # GH#17033\n # Test fillna for a Categorical series\n data = ["a", np.nan, "b", np.nan, np.nan]\n ser = Series(Categorical(data, categories=["a", "b"]))\n exp = Series(Categorical(expected_output, categories=["a", "b"]))\n result = ser.fillna(fill_value)\n tm.assert_series_equal(result, exp)\n\n @pytest.mark.parametrize(\n "fill_value, expected_output",\n [\n (Series(["a", "b", "c", "d", "e"]), ["a", "b", "b", "d", "e"]),\n (Series(["b", "d", "a", "d", "a"]), ["a", "d", "b", "d", "a"]),\n (\n Series(\n Categorical(\n ["b", "d", "a", "d", "a"], categories=["b", "c", "d", "e", "a"]\n )\n ),\n ["a", "d", "b", "d", "a"],\n ),\n ],\n )\n def test_fillna_categorical_with_new_categories(self, fill_value, expected_output):\n # GH#26215\n data = ["a", np.nan, "b", np.nan, np.nan]\n ser = Series(Categorical(data, categories=["a", "b", "c", "d", "e"]))\n exp = Series(Categorical(expected_output, categories=["a", "b", "c", "d", "e"]))\n result = ser.fillna(fill_value)\n tm.assert_series_equal(result, exp)\n\n def test_fillna_categorical_raises(self):\n data = ["a", np.nan, "b", np.nan, np.nan]\n ser = Series(Categorical(data, categories=["a", "b"]))\n cat = ser._values\n\n msg = "Cannot setitem on a Categorical with a new category"\n with pytest.raises(TypeError, match=msg):\n ser.fillna("d")\n\n msg2 = "Length of 'value' does not match."\n with pytest.raises(ValueError, match=msg2):\n cat.fillna(Series("d"))\n\n with pytest.raises(TypeError, match=msg):\n ser.fillna({1: "d", 3: "a"})\n\n msg = '"value" parameter must be a scalar or dict, but you passed a "list"'\n with pytest.raises(TypeError, match=msg):\n ser.fillna(["a", "b"])\n\n msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'\n with pytest.raises(TypeError, match=msg):\n ser.fillna(("a", "b"))\n\n msg = (\n '"value" parameter must be a scalar, dict '\n 'or Series, but you passed a "DataFrame"'\n )\n with pytest.raises(TypeError, match=msg):\n ser.fillna(DataFrame({1: ["a"], 3: ["b"]}))\n\n @pytest.mark.parametrize("dtype", [float, "float32", "float64"])\n @pytest.mark.parametrize("fill_type", tm.ALL_REAL_NUMPY_DTYPES)\n @pytest.mark.parametrize("scalar", [True, False])\n def test_fillna_float_casting(self, dtype, fill_type, scalar):\n # GH-43424\n ser = Series([np.nan, 1.2], dtype=dtype)\n fill_values = Series([2, 2], dtype=fill_type)\n if scalar:\n fill_values = fill_values.dtype.type(2)\n\n result = ser.fillna(fill_values)\n expected = Series([2.0, 1.2], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n ser = Series([np.nan, 1.2], dtype=dtype)\n mask = ser.isna().to_numpy()\n ser[mask] = fill_values\n tm.assert_series_equal(ser, expected)\n\n ser = Series([np.nan, 1.2], dtype=dtype)\n ser.mask(mask, fill_values, inplace=True)\n tm.assert_series_equal(ser, expected)\n\n ser = Series([np.nan, 1.2], dtype=dtype)\n res = ser.where(~mask, fill_values)\n tm.assert_series_equal(res, expected)\n\n def test_fillna_f32_upcast_with_dict(self):\n # GH-43424\n ser = Series([np.nan, 1.2], dtype=np.float32)\n result = ser.fillna({0: 1})\n expected = Series([1.0, 1.2], dtype=np.float32)\n tm.assert_series_equal(result, expected)\n\n # ---------------------------------------------------------------\n # Invalid Usages\n\n def test_fillna_invalid_method(self, datetime_series):\n try:\n datetime_series.fillna(method="ffil")\n except ValueError as inst:\n assert "ffil" in str(inst)\n\n def test_fillna_listlike_invalid(self):\n ser = Series(np.random.default_rng(2).integers(-100, 100, 50))\n msg = '"value" parameter must be a scalar or dict, but you passed a "list"'\n with pytest.raises(TypeError, match=msg):\n ser.fillna([1, 2])\n\n msg = '"value" parameter must be a scalar or dict, but you passed a "tuple"'\n with pytest.raises(TypeError, match=msg):\n ser.fillna((1, 2))\n\n def test_fillna_method_and_limit_invalid(self):\n # related GH#9217, make sure limit is an int and greater than 0\n ser = Series([1, 2, 3, None])\n msg = "|".join(\n [\n r"Cannot specify both 'value' and 'method'\.",\n "Limit must be greater than 0",\n "Limit must be an integer",\n ]\n )\n for limit in [-1, 0, 1.0, 2.0]:\n for method in ["backfill", "bfill", "pad", "ffill", None]:\n with pytest.raises(ValueError, match=msg):\n ser.fillna(1, limit=limit, method=method)\n\n def test_fillna_datetime64_with_timezone_tzinfo(self):\n # https://github.com/pandas-dev/pandas/issues/38851\n # different tzinfos representing UTC treated as equal\n ser = Series(date_range("2020", periods=3, tz="UTC"))\n expected = ser.copy()\n ser[1] = NaT\n result = ser.fillna(datetime(2020, 1, 2, tzinfo=timezone.utc))\n tm.assert_series_equal(result, expected)\n\n # pre-2.0 we cast to object with mixed tzs, in 2.0 we retain dtype\n ts = Timestamp("2000-01-01", tz="US/Pacific")\n ser2 = Series(ser._values.tz_convert("dateutil/US/Pacific"))\n assert ser2.dtype.kind == "M"\n result = ser2.fillna(ts)\n expected = Series(\n [ser2[0], ts.tz_convert(ser2.dtype.tz), ser2[2]],\n dtype=ser2.dtype,\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "input, input_fillna, expected_data, expected_categories",\n [\n (["A", "B", None, "A"], "B", ["A", "B", "B", "A"], ["A", "B"]),\n (["A", "B", np.nan, "A"], "B", ["A", "B", "B", "A"], ["A", "B"]),\n ],\n )\n def test_fillna_categorical_accept_same_type(\n self, input, input_fillna, expected_data, expected_categories\n ):\n # GH32414\n cat = Categorical(input)\n ser = Series(cat).fillna(input_fillna)\n filled = cat.fillna(ser)\n result = cat.fillna(filled)\n expected = Categorical(expected_data, categories=expected_categories)\n tm.assert_categorical_equal(result, expected)\n\n\n@pytest.mark.filterwarnings(\n "ignore:Series.fillna with 'method' is deprecated:FutureWarning"\n)\nclass TestFillnaPad:\n def test_fillna_bug(self):\n ser = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"])\n filled = ser.fillna(method="ffill")\n expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], ser.index)\n tm.assert_series_equal(filled, expected)\n\n filled = ser.fillna(method="bfill")\n expected = Series([1.0, 1.0, 3.0, 3.0, np.nan], ser.index)\n tm.assert_series_equal(filled, expected)\n\n def test_ffill(self):\n ts = Series(\n [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5)\n )\n ts.iloc[2] = np.nan\n tm.assert_series_equal(ts.ffill(), ts.fillna(method="ffill"))\n\n def test_ffill_mixed_dtypes_without_missing_data(self):\n # GH#14956\n series = Series([datetime(2015, 1, 1, tzinfo=pytz.utc), 1])\n result = series.ffill()\n tm.assert_series_equal(series, result)\n\n def test_bfill(self):\n ts = Series(\n [0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("2020-01-01", periods=5)\n )\n ts.iloc[2] = np.nan\n tm.assert_series_equal(ts.bfill(), ts.fillna(method="bfill"))\n\n def test_pad_nan(self):\n x = Series(\n [np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"], dtype=float\n )\n\n return_value = x.fillna(method="pad", inplace=True)\n assert return_value is None\n\n expected = Series(\n [np.nan, 1.0, 1.0, 3.0, 3.0], ["z", "a", "b", "c", "d"], dtype=float\n )\n tm.assert_series_equal(x[1:], expected[1:])\n assert np.isnan(x.iloc[0]), np.isnan(expected.iloc[0])\n\n def test_series_fillna_limit(self):\n index = np.arange(10)\n s = Series(np.random.default_rng(2).standard_normal(10), index=index)\n\n result = s[:2].reindex(index)\n result = result.fillna(method="pad", limit=5)\n\n expected = s[:2].reindex(index).fillna(method="pad")\n expected[-3:] = np.nan\n tm.assert_series_equal(result, expected)\n\n result = s[-2:].reindex(index)\n result = result.fillna(method="bfill", limit=5)\n\n expected = s[-2:].reindex(index).fillna(method="backfill")\n expected[:3] = np.nan\n tm.assert_series_equal(result, expected)\n\n def test_series_pad_backfill_limit(self):\n index = np.arange(10)\n s = Series(np.random.default_rng(2).standard_normal(10), index=index)\n\n result = s[:2].reindex(index, method="pad", limit=5)\n\n expected = s[:2].reindex(index).fillna(method="pad")\n expected[-3:] = np.nan\n tm.assert_series_equal(result, expected)\n\n result = s[-2:].reindex(index, method="backfill", limit=5)\n\n expected = s[-2:].reindex(index).fillna(method="backfill")\n expected[:3] = np.nan\n tm.assert_series_equal(result, expected)\n\n def test_fillna_int(self):\n ser = Series(np.random.default_rng(2).integers(-100, 100, 50))\n return_value = ser.fillna(method="ffill", inplace=True)\n assert return_value is None\n tm.assert_series_equal(ser.fillna(method="ffill", inplace=False), ser)\n\n def test_datetime64tz_fillna_round_issue(self):\n # GH#14872\n\n data = Series(\n [NaT, NaT, datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc)]\n )\n\n filled = data.bfill()\n\n expected = Series(\n [\n datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),\n datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),\n datetime(2016, 12, 12, 22, 24, 6, 100001, tzinfo=pytz.utc),\n ]\n )\n\n tm.assert_series_equal(filled, expected)\n\n def test_fillna_parr(self):\n # GH-24537\n dti = date_range(\n Timestamp.max - Timedelta(nanoseconds=10), periods=5, freq="ns"\n )\n ser = Series(dti.to_period("ns"))\n ser[2] = NaT\n arr = period_array(\n [\n Timestamp("2262-04-11 23:47:16.854775797"),\n Timestamp("2262-04-11 23:47:16.854775798"),\n Timestamp("2262-04-11 23:47:16.854775798"),\n Timestamp("2262-04-11 23:47:16.854775800"),\n Timestamp("2262-04-11 23:47:16.854775801"),\n ],\n freq="ns",\n )\n expected = Series(arr)\n\n filled = ser.ffill()\n\n tm.assert_series_equal(filled, expected)\n\n @pytest.mark.parametrize("func", ["pad", "backfill"])\n def test_pad_backfill_deprecated(self, func):\n # GH#33396\n ser = Series([1, 2, 3])\n with tm.assert_produces_warning(FutureWarning):\n getattr(ser, func)()\n\n\n@pytest.mark.parametrize(\n "data, expected_data, method, kwargs",\n (\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, 3.0, 3.0, 3.0, 7.0, np.nan, np.nan],\n "ffill",\n {"limit_area": "inside"},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, 3.0, np.nan, np.nan, 7.0, np.nan, np.nan],\n "ffill",\n {"limit_area": "inside", "limit": 1},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0],\n "ffill",\n {"limit_area": "outside"},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan],\n "ffill",\n {"limit_area": "outside", "limit": 1},\n ),\n (\n [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],\n "ffill",\n {"limit_area": "outside", "limit": 1},\n ),\n (\n range(5),\n range(5),\n "ffill",\n {"limit_area": "outside", "limit": 1},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, 7.0, 7.0, 7.0, 7.0, np.nan, np.nan],\n "bfill",\n {"limit_area": "inside"},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, np.nan, np.nan, 7.0, 7.0, np.nan, np.nan],\n "bfill",\n {"limit_area": "inside", "limit": 1},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],\n "bfill",\n {"limit_area": "outside"},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],\n "bfill",\n {"limit_area": "outside", "limit": 1},\n ),\n ),\n)\ndef test_ffill_bfill_limit_area(data, expected_data, method, kwargs):\n # GH#56492\n s = Series(data)\n expected = Series(expected_data)\n result = getattr(s, method)(**kwargs)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_fillna.py | test_fillna.py | Python | 39,985 | 0.95 | 0.048485 | 0.057942 | python-kit | 433 | 2025-03-20T13:44:34.788583 | Apache-2.0 | true | f7514a7d5c5f73a50f5136ee28670557 |
from pandas import (\n Index,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestGetNumericData:\n def test_get_numeric_data_preserve_dtype(\n self, using_copy_on_write, warn_copy_on_write\n ):\n # get the numeric data\n obj = Series([1, 2, 3])\n result = obj._get_numeric_data()\n tm.assert_series_equal(result, obj)\n\n # returned object is a shallow copy\n with tm.assert_cow_warning(warn_copy_on_write):\n result.iloc[0] = 0\n if using_copy_on_write:\n assert obj.iloc[0] == 1\n else:\n assert obj.iloc[0] == 0\n\n obj = Series([1, "2", 3.0])\n result = obj._get_numeric_data()\n expected = Series([], dtype=object, index=Index([], dtype=object))\n tm.assert_series_equal(result, expected)\n\n obj = Series([True, False, True])\n result = obj._get_numeric_data()\n tm.assert_series_equal(result, obj)\n\n obj = Series(date_range("20130101", periods=3))\n result = obj._get_numeric_data()\n expected = Series([], dtype="M8[ns]", index=Index([], dtype=object))\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_get_numeric_data.py | test_get_numeric_data.py | Python | 1,178 | 0.95 | 0.078947 | 0.0625 | vue-tools | 941 | 2024-04-24T21:30:10.056646 | BSD-3-Clause | true | 5a180e9e51fd308b0b69f1c868993787 |
import pandas._testing as tm\n\n\ndef test_head_tail(string_series):\n tm.assert_series_equal(string_series.head(), string_series[:5])\n tm.assert_series_equal(string_series.head(0), string_series[0:0])\n tm.assert_series_equal(string_series.tail(), string_series[-5:])\n tm.assert_series_equal(string_series.tail(0), string_series[0:0])\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_head_tail.py | test_head_tail.py | Python | 343 | 0.85 | 0.125 | 0 | node-utils | 473 | 2024-02-22T20:22:40.391034 | MIT | true | 6346d07e056191514aa0cdb0130882ed |
import numpy as np\n\nfrom pandas import (\n Series,\n interval_range,\n)\nimport pandas._testing as tm\n\n\nclass TestInferObjects:\n def test_copy(self, index_or_series):\n # GH#50096\n # case where we don't need to do inference because it is already non-object\n obj = index_or_series(np.array([1, 2, 3], dtype="int64"))\n\n result = obj.infer_objects(copy=False)\n assert tm.shares_memory(result, obj)\n\n # case where we try to do inference but can't do better than object\n obj2 = index_or_series(np.array(["foo", 2], dtype=object))\n result2 = obj2.infer_objects(copy=False)\n assert tm.shares_memory(result2, obj2)\n\n def test_infer_objects_series(self, index_or_series):\n # GH#11221\n actual = index_or_series(np.array([1, 2, 3], dtype="O")).infer_objects()\n expected = index_or_series([1, 2, 3])\n tm.assert_equal(actual, expected)\n\n actual = index_or_series(np.array([1, 2, 3, None], dtype="O")).infer_objects()\n expected = index_or_series([1.0, 2.0, 3.0, np.nan])\n tm.assert_equal(actual, expected)\n\n # only soft conversions, unconvertible pass thru unchanged\n\n obj = index_or_series(np.array([1, 2, 3, None, "a"], dtype="O"))\n actual = obj.infer_objects()\n expected = index_or_series([1, 2, 3, None, "a"], dtype=object)\n\n assert actual.dtype == "object"\n tm.assert_equal(actual, expected)\n\n def test_infer_objects_interval(self, index_or_series):\n # GH#50090\n ii = interval_range(1, 10)\n obj = index_or_series(ii)\n\n result = obj.astype(object).infer_objects()\n tm.assert_equal(result, obj)\n\n def test_infer_objects_bytes(self):\n # GH#49650\n ser = Series([b"a"], dtype="bytes")\n expected = ser.copy()\n result = ser.infer_objects()\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_infer_objects.py | test_infer_objects.py | Python | 1,903 | 0.95 | 0.107143 | 0.162791 | node-utils | 245 | 2024-06-15T05:31:35.793553 | GPL-3.0 | true | 74a83c9cb8a6776132f495a618896434 |
from io import StringIO\nfrom string import ascii_uppercase\nimport textwrap\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas.compat import (\n HAS_PYARROW,\n PYPY,\n)\n\nfrom pandas import (\n CategoricalIndex,\n Index,\n MultiIndex,\n Series,\n date_range,\n)\n\n\ndef test_info_categorical_column_just_works():\n n = 2500\n data = np.array(list("abcdefghij")).take(\n np.random.default_rng(2).integers(0, 10, size=n, dtype=int)\n )\n s = Series(data).astype("category")\n s.isna()\n buf = StringIO()\n s.info(buf=buf)\n\n s2 = s[s == "d"]\n buf = StringIO()\n s2.info(buf=buf)\n\n\ndef test_info_categorical():\n # GH14298\n idx = CategoricalIndex(["a", "b"])\n s = Series(np.zeros(2), index=idx)\n buf = StringIO()\n s.info(buf=buf)\n\n\n@pytest.mark.parametrize("verbose", [True, False])\ndef test_info_series(\n lexsorted_two_level_string_multiindex, verbose, using_infer_string\n):\n index = lexsorted_two_level_string_multiindex\n ser = Series(range(len(index)), index=index, name="sth")\n buf = StringIO()\n ser.info(verbose=verbose, buf=buf)\n result = buf.getvalue()\n\n expected = textwrap.dedent(\n """\\n <class 'pandas.core.series.Series'>\n MultiIndex: 10 entries, ('foo', 'one') to ('qux', 'three')\n """\n )\n if verbose:\n expected += textwrap.dedent(\n """\\n Series name: sth\n Non-Null Count Dtype\n -------------- -----\n 10 non-null int64\n """\n )\n qualifier = "" if using_infer_string and HAS_PYARROW else "+"\n expected += textwrap.dedent(\n f"""\\n dtypes: int64(1)\n memory usage: {ser.memory_usage()}.0{qualifier} bytes\n """\n )\n assert result == expected\n\n\ndef test_info_memory():\n s = Series([1, 2], dtype="i8")\n buf = StringIO()\n s.info(buf=buf)\n result = buf.getvalue()\n memory_bytes = float(s.memory_usage())\n expected = textwrap.dedent(\n f"""\\n <class 'pandas.core.series.Series'>\n RangeIndex: 2 entries, 0 to 1\n Series name: None\n Non-Null Count Dtype\n -------------- -----\n 2 non-null int64\n dtypes: int64(1)\n memory usage: {memory_bytes} bytes\n """\n )\n assert result == expected\n\n\ndef test_info_wide():\n s = Series(np.random.default_rng(2).standard_normal(101))\n msg = "Argument `max_cols` can only be passed in DataFrame.info, not Series.info"\n with pytest.raises(ValueError, match=msg):\n s.info(max_cols=1)\n\n\ndef test_info_shows_dtypes():\n dtypes = [\n "int64",\n "float64",\n "datetime64[ns]",\n "timedelta64[ns]",\n "complex128",\n "object",\n "bool",\n ]\n n = 10\n for dtype in dtypes:\n s = Series(np.random.default_rng(2).integers(2, size=n).astype(dtype))\n buf = StringIO()\n s.info(buf=buf)\n res = buf.getvalue()\n name = f"{n:d} non-null {dtype}"\n assert name in res\n\n\n@pytest.mark.xfail(PYPY, reason="on PyPy deep=True doesn't change result")\ndef test_info_memory_usage_deep_not_pypy():\n s_with_object_index = Series({"a": [1]}, index=["foo"])\n assert s_with_object_index.memory_usage(\n index=True, deep=True\n ) > s_with_object_index.memory_usage(index=True)\n\n s_object = Series({"a": ["a"]})\n assert s_object.memory_usage(deep=True) > s_object.memory_usage()\n\n\n@pytest.mark.xfail(not PYPY, reason="on PyPy deep=True does not change result")\ndef test_info_memory_usage_deep_pypy():\n s_with_object_index = Series({"a": [1]}, index=["foo"])\n assert s_with_object_index.memory_usage(\n index=True, deep=True\n ) == s_with_object_index.memory_usage(index=True)\n\n s_object = Series({"a": ["a"]})\n assert s_object.memory_usage(deep=True) == s_object.memory_usage()\n\n\n@pytest.mark.parametrize(\n "index, plus",\n [\n ([1, 2, 3], False),\n (Index(list("ABC"), dtype="str"), not (using_string_dtype() and HAS_PYARROW)),\n (Index(list("ABC"), dtype=object), True),\n (MultiIndex.from_product([range(3), range(3)]), False),\n (\n MultiIndex.from_product([range(3), ["foo", "bar"]]),\n not (using_string_dtype() and HAS_PYARROW),\n ),\n ],\n)\ndef test_info_memory_usage_qualified(index, plus):\n series = Series(1, index=index)\n buf = StringIO()\n series.info(buf=buf)\n if plus:\n assert "+" in buf.getvalue()\n else:\n assert "+" not in buf.getvalue()\n\n\ndef test_info_memory_usage_bug_on_multiindex():\n # GH 14308\n # memory usage introspection should not materialize .values\n N = 100\n M = len(ascii_uppercase)\n index = MultiIndex.from_product(\n [list(ascii_uppercase), date_range("20160101", periods=N)],\n names=["id", "date"],\n )\n s = Series(np.random.default_rng(2).standard_normal(N * M), index=index)\n\n unstacked = s.unstack("id")\n assert s.values.nbytes == unstacked.values.nbytes\n assert s.memory_usage(deep=True) > unstacked.memory_usage(deep=True).sum()\n\n # high upper bound\n diff = unstacked.memory_usage(deep=True).sum() - s.memory_usage(deep=True)\n assert diff < 2000\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_info.py | test_info.py | Python | 5,205 | 0.95 | 0.083333 | 0.024691 | python-kit | 203 | 2024-02-25T13:29:38.770458 | GPL-3.0 | true | f85063c74c44d177618526381c623131 |
import numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n Index,\n MultiIndex,\n Series,\n date_range,\n isna,\n)\nimport pandas._testing as tm\n\n\n@pytest.fixture(\n params=[\n "linear",\n "index",\n "values",\n "nearest",\n "slinear",\n "zero",\n "quadratic",\n "cubic",\n "barycentric",\n "krogh",\n "polynomial",\n "spline",\n "piecewise_polynomial",\n "from_derivatives",\n "pchip",\n "akima",\n "cubicspline",\n ]\n)\ndef nontemporal_method(request):\n """Fixture that returns an (method name, required kwargs) pair.\n\n This fixture does not include method 'time' as a parameterization; that\n method requires a Series with a DatetimeIndex, and is generally tested\n separately from these non-temporal methods.\n """\n method = request.param\n kwargs = {"order": 1} if method in ("spline", "polynomial") else {}\n return method, kwargs\n\n\n@pytest.fixture(\n params=[\n "linear",\n "slinear",\n "zero",\n "quadratic",\n "cubic",\n "barycentric",\n "krogh",\n "polynomial",\n "spline",\n "piecewise_polynomial",\n "from_derivatives",\n "pchip",\n "akima",\n "cubicspline",\n ]\n)\ndef interp_methods_ind(request):\n """Fixture that returns a (method name, required kwargs) pair to\n be tested for various Index types.\n\n This fixture does not include methods - 'time', 'index', 'nearest',\n 'values' as a parameterization\n """\n method = request.param\n kwargs = {"order": 1} if method in ("spline", "polynomial") else {}\n return method, kwargs\n\n\nclass TestSeriesInterpolateData:\n @pytest.mark.xfail(reason="EA.fillna does not handle 'linear' method")\n def test_interpolate_period_values(self):\n orig = Series(date_range("2012-01-01", periods=5))\n ser = orig.copy()\n ser[2] = pd.NaT\n\n # period cast\n ser_per = ser.dt.to_period("D")\n res_per = ser_per.interpolate()\n expected_per = orig.dt.to_period("D")\n tm.assert_series_equal(res_per, expected_per)\n\n def test_interpolate(self, datetime_series):\n ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)\n\n ts_copy = ts.copy()\n ts_copy[5:10] = np.nan\n\n linear_interp = ts_copy.interpolate(method="linear")\n tm.assert_series_equal(linear_interp, ts)\n\n ord_ts = Series(\n [d.toordinal() for d in datetime_series.index], index=datetime_series.index\n ).astype(float)\n\n ord_ts_copy = ord_ts.copy()\n ord_ts_copy[5:10] = np.nan\n\n time_interp = ord_ts_copy.interpolate(method="time")\n tm.assert_series_equal(time_interp, ord_ts)\n\n def test_interpolate_time_raises_for_non_timeseries(self):\n # When method='time' is used on a non-TimeSeries that contains a null\n # value, a ValueError should be raised.\n non_ts = Series([0, 1, 2, np.nan])\n msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"\n with pytest.raises(ValueError, match=msg):\n non_ts.interpolate(method="time")\n\n def test_interpolate_cubicspline(self):\n pytest.importorskip("scipy")\n ser = Series([10, 11, 12, 13])\n\n expected = Series(\n [11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],\n index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),\n )\n # interpolate at new_index\n new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(\n float\n )\n result = ser.reindex(new_index).interpolate(method="cubicspline").loc[1:3]\n tm.assert_series_equal(result, expected)\n\n def test_interpolate_pchip(self):\n pytest.importorskip("scipy")\n ser = Series(np.sort(np.random.default_rng(2).uniform(size=100)))\n\n # interpolate at new_index\n new_index = ser.index.union(\n Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])\n ).astype(float)\n interp_s = ser.reindex(new_index).interpolate(method="pchip")\n # does not blow up, GH5977\n interp_s.loc[49:51]\n\n def test_interpolate_akima(self):\n pytest.importorskip("scipy")\n ser = Series([10, 11, 12, 13])\n\n # interpolate at new_index where `der` is zero\n expected = Series(\n [11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],\n index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),\n )\n new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(\n float\n )\n interp_s = ser.reindex(new_index).interpolate(method="akima")\n tm.assert_series_equal(interp_s.loc[1:3], expected)\n\n # interpolate at new_index where `der` is a non-zero int\n expected = Series(\n [11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],\n index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),\n )\n new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(\n float\n )\n interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)\n tm.assert_series_equal(interp_s.loc[1:3], expected)\n\n def test_interpolate_piecewise_polynomial(self):\n pytest.importorskip("scipy")\n ser = Series([10, 11, 12, 13])\n\n expected = Series(\n [11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],\n index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),\n )\n # interpolate at new_index\n new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(\n float\n )\n interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")\n tm.assert_series_equal(interp_s.loc[1:3], expected)\n\n def test_interpolate_from_derivatives(self):\n pytest.importorskip("scipy")\n ser = Series([10, 11, 12, 13])\n\n expected = Series(\n [11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],\n index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),\n )\n # interpolate at new_index\n new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(\n float\n )\n interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")\n tm.assert_series_equal(interp_s.loc[1:3], expected)\n\n @pytest.mark.parametrize(\n "kwargs",\n [\n {},\n pytest.param(\n {"method": "polynomial", "order": 1}, marks=td.skip_if_no("scipy")\n ),\n ],\n )\n def test_interpolate_corners(self, kwargs):\n s = Series([np.nan, np.nan])\n tm.assert_series_equal(s.interpolate(**kwargs), s)\n\n s = Series([], dtype=object).interpolate()\n tm.assert_series_equal(s.interpolate(**kwargs), s)\n\n def test_interpolate_index_values(self):\n s = Series(np.nan, index=np.sort(np.random.default_rng(2).random(30)))\n s.loc[::3] = np.random.default_rng(2).standard_normal(10)\n\n vals = s.index.values.astype(float)\n\n result = s.interpolate(method="index")\n\n expected = s.copy()\n bad = isna(expected.values)\n good = ~bad\n expected = Series(\n np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]\n )\n\n tm.assert_series_equal(result[bad], expected)\n\n # 'values' is synonymous with 'index' for the method kwarg\n other_result = s.interpolate(method="values")\n\n tm.assert_series_equal(other_result, result)\n tm.assert_series_equal(other_result[bad], expected)\n\n def test_interpolate_non_ts(self):\n s = Series([1, 3, np.nan, np.nan, np.nan, 11])\n msg = (\n "time-weighted interpolation only works on Series or DataFrames "\n "with a DatetimeIndex"\n )\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method="time")\n\n @pytest.mark.parametrize(\n "kwargs",\n [\n {},\n pytest.param(\n {"method": "polynomial", "order": 1}, marks=td.skip_if_no("scipy")\n ),\n ],\n )\n def test_nan_interpolate(self, kwargs):\n s = Series([0, 1, np.nan, 3])\n result = s.interpolate(**kwargs)\n expected = Series([0.0, 1.0, 2.0, 3.0])\n tm.assert_series_equal(result, expected)\n\n def test_nan_irregular_index(self):\n s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])\n result = s.interpolate()\n expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])\n tm.assert_series_equal(result, expected)\n\n def test_nan_str_index(self):\n s = Series([0, 1, 2, np.nan], index=list("abcd"))\n result = s.interpolate()\n expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))\n tm.assert_series_equal(result, expected)\n\n def test_interp_quad(self):\n pytest.importorskip("scipy")\n sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])\n result = sq.interpolate(method="quadratic")\n expected = Series([1.0, 4.0, 9.0, 16.0], index=[1, 2, 3, 4])\n tm.assert_series_equal(result, expected)\n\n def test_interp_scipy_basic(self):\n pytest.importorskip("scipy")\n s = Series([1, 3, np.nan, 12, np.nan, 25])\n # slinear\n expected = Series([1.0, 3.0, 7.5, 12.0, 18.5, 25.0])\n result = s.interpolate(method="slinear")\n tm.assert_series_equal(result, expected)\n\n msg = "The 'downcast' keyword in Series.interpolate is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.interpolate(method="slinear", downcast="infer")\n tm.assert_series_equal(result, expected)\n # nearest\n expected = Series([1, 3, 3, 12, 12, 25])\n result = s.interpolate(method="nearest")\n tm.assert_series_equal(result, expected.astype("float"))\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.interpolate(method="nearest", downcast="infer")\n tm.assert_series_equal(result, expected)\n # zero\n expected = Series([1, 3, 3, 12, 12, 25])\n result = s.interpolate(method="zero")\n tm.assert_series_equal(result, expected.astype("float"))\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.interpolate(method="zero", downcast="infer")\n tm.assert_series_equal(result, expected)\n # quadratic\n # GH #15662.\n expected = Series([1, 3.0, 6.823529, 12.0, 18.058824, 25.0])\n result = s.interpolate(method="quadratic")\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.interpolate(method="quadratic", downcast="infer")\n tm.assert_series_equal(result, expected)\n # cubic\n expected = Series([1.0, 3.0, 6.8, 12.0, 18.2, 25.0])\n result = s.interpolate(method="cubic")\n tm.assert_series_equal(result, expected)\n\n def test_interp_limit(self):\n s = Series([1, 3, np.nan, np.nan, np.nan, 11])\n\n expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])\n result = s.interpolate(method="linear", limit=2)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("limit", [-1, 0])\n def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, limit):\n # GH 9217: make sure limit is greater than zero.\n s = Series([1, 2, np.nan, 4])\n method, kwargs = nontemporal_method\n with pytest.raises(ValueError, match="Limit must be greater than 0"):\n s.interpolate(limit=limit, method=method, **kwargs)\n\n def test_interpolate_invalid_float_limit(self, nontemporal_method):\n # GH 9217: make sure limit is an integer.\n s = Series([1, 2, np.nan, 4])\n method, kwargs = nontemporal_method\n limit = 2.0\n with pytest.raises(ValueError, match="Limit must be an integer"):\n s.interpolate(limit=limit, method=method, **kwargs)\n\n @pytest.mark.parametrize("invalid_method", [None, "nonexistent_method"])\n def test_interp_invalid_method(self, invalid_method):\n s = Series([1, 3, np.nan, 12, np.nan, 25])\n\n msg = f"method must be one of.* Got '{invalid_method}' instead"\n if invalid_method is None:\n msg = "'method' should be a string, not None"\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method=invalid_method)\n\n # When an invalid method and invalid limit (such as -1) are\n # provided, the error message reflects the invalid method.\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method=invalid_method, limit=-1)\n\n def test_interp_invalid_method_and_value(self):\n # GH#36624\n ser = Series([1, 3, np.nan, 12, np.nan, 25])\n\n msg = "'fill_value' is not a valid keyword for Series.interpolate"\n msg2 = "Series.interpolate with method=pad"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n ser.interpolate(fill_value=3, method="pad")\n\n def test_interp_limit_forward(self):\n s = Series([1, 3, np.nan, np.nan, np.nan, 11])\n\n # Provide 'forward' (the default) explicitly here.\n expected = Series([1.0, 3.0, 5.0, 7.0, np.nan, 11.0])\n\n result = s.interpolate(method="linear", limit=2, limit_direction="forward")\n tm.assert_series_equal(result, expected)\n\n result = s.interpolate(method="linear", limit=2, limit_direction="FORWARD")\n tm.assert_series_equal(result, expected)\n\n def test_interp_unlimited(self):\n # these test are for issue #16282 default Limit=None is unlimited\n s = Series([np.nan, 1.0, 3.0, np.nan, np.nan, np.nan, 11.0, np.nan])\n expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])\n result = s.interpolate(method="linear", limit_direction="both")\n tm.assert_series_equal(result, expected)\n\n expected = Series([np.nan, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 11.0])\n result = s.interpolate(method="linear", limit_direction="forward")\n tm.assert_series_equal(result, expected)\n\n expected = Series([1.0, 1.0, 3.0, 5.0, 7.0, 9.0, 11.0, np.nan])\n result = s.interpolate(method="linear", limit_direction="backward")\n tm.assert_series_equal(result, expected)\n\n def test_interp_limit_bad_direction(self):\n s = Series([1, 3, np.nan, np.nan, np.nan, 11])\n\n msg = (\n r"Invalid limit_direction: expecting one of \['forward', "\n r"'backward', 'both'\], got 'abc'"\n )\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method="linear", limit=2, limit_direction="abc")\n\n # raises an error even if no limit is specified.\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method="linear", limit_direction="abc")\n\n # limit_area introduced GH #16284\n def test_interp_limit_area(self):\n # These tests are for issue #9218 -- fill NaNs in both directions.\n s = Series([np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan])\n\n expected = Series([np.nan, np.nan, 3.0, 4.0, 5.0, 6.0, 7.0, np.nan, np.nan])\n result = s.interpolate(method="linear", limit_area="inside")\n tm.assert_series_equal(result, expected)\n\n expected = Series(\n [np.nan, np.nan, 3.0, 4.0, np.nan, np.nan, 7.0, np.nan, np.nan]\n )\n result = s.interpolate(method="linear", limit_area="inside", limit=1)\n tm.assert_series_equal(result, expected)\n\n expected = Series([np.nan, np.nan, 3.0, 4.0, np.nan, 6.0, 7.0, np.nan, np.nan])\n result = s.interpolate(\n method="linear", limit_area="inside", limit_direction="both", limit=1\n )\n tm.assert_series_equal(result, expected)\n\n expected = Series([np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0])\n result = s.interpolate(method="linear", limit_area="outside")\n tm.assert_series_equal(result, expected)\n\n expected = Series(\n [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan]\n )\n result = s.interpolate(method="linear", limit_area="outside", limit=1)\n tm.assert_series_equal(result, expected)\n\n expected = Series([np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan])\n result = s.interpolate(\n method="linear", limit_area="outside", limit_direction="both", limit=1\n )\n tm.assert_series_equal(result, expected)\n\n expected = Series([3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan])\n result = s.interpolate(\n method="linear", limit_area="outside", limit_direction="backward"\n )\n tm.assert_series_equal(result, expected)\n\n # raises an error even if limit type is wrong.\n msg = r"Invalid limit_area: expecting one of \['inside', 'outside'\], got abc"\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method="linear", limit_area="abc")\n\n @pytest.mark.parametrize(\n "method, limit_direction, expected",\n [\n ("pad", "backward", "forward"),\n ("ffill", "backward", "forward"),\n ("backfill", "forward", "backward"),\n ("bfill", "forward", "backward"),\n ("pad", "both", "forward"),\n ("ffill", "both", "forward"),\n ("backfill", "both", "backward"),\n ("bfill", "both", "backward"),\n ],\n )\n def test_interp_limit_direction_raises(self, method, limit_direction, expected):\n # https://github.com/pandas-dev/pandas/pull/34746\n s = Series([1, 2, 3])\n\n msg = f"`limit_direction` must be '{expected}' for method `{method}`"\n msg2 = "Series.interpolate with method="\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n s.interpolate(method=method, limit_direction=limit_direction)\n\n @pytest.mark.parametrize(\n "data, expected_data, kwargs",\n (\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, 3.0, 3.0, 3.0, 7.0, np.nan, np.nan],\n {"method": "pad", "limit_area": "inside"},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, 3.0, np.nan, np.nan, 7.0, np.nan, np.nan],\n {"method": "pad", "limit_area": "inside", "limit": 1},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, 7.0],\n {"method": "pad", "limit_area": "outside"},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, np.nan, np.nan, np.nan, 7.0, 7.0, np.nan],\n {"method": "pad", "limit_area": "outside", "limit": 1},\n ),\n (\n [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],\n {"method": "pad", "limit_area": "outside", "limit": 1},\n ),\n (\n range(5),\n range(5),\n {"method": "pad", "limit_area": "outside", "limit": 1},\n ),\n ),\n )\n def test_interp_limit_area_with_pad(self, data, expected_data, kwargs):\n # GH26796\n\n s = Series(data)\n expected = Series(expected_data)\n msg = "Series.interpolate with method=pad"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.interpolate(**kwargs)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data, expected_data, kwargs",\n (\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, 7.0, 7.0, 7.0, 7.0, np.nan, np.nan],\n {"method": "bfill", "limit_area": "inside"},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, np.nan, 3.0, np.nan, np.nan, 7.0, 7.0, np.nan, np.nan],\n {"method": "bfill", "limit_area": "inside", "limit": 1},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [3.0, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],\n {"method": "bfill", "limit_area": "outside"},\n ),\n (\n [np.nan, np.nan, 3, np.nan, np.nan, np.nan, 7, np.nan, np.nan],\n [np.nan, 3.0, 3.0, np.nan, np.nan, np.nan, 7.0, np.nan, np.nan],\n {"method": "bfill", "limit_area": "outside", "limit": 1},\n ),\n ),\n )\n def test_interp_limit_area_with_backfill(self, data, expected_data, kwargs):\n # GH26796\n\n s = Series(data)\n expected = Series(expected_data)\n msg = "Series.interpolate with method=bfill"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.interpolate(**kwargs)\n tm.assert_series_equal(result, expected)\n\n def test_interp_limit_direction(self):\n # These tests are for issue #9218 -- fill NaNs in both directions.\n s = Series([1, 3, np.nan, np.nan, np.nan, 11])\n\n expected = Series([1.0, 3.0, np.nan, 7.0, 9.0, 11.0])\n result = s.interpolate(method="linear", limit=2, limit_direction="backward")\n tm.assert_series_equal(result, expected)\n\n expected = Series([1.0, 3.0, 5.0, np.nan, 9.0, 11.0])\n result = s.interpolate(method="linear", limit=1, limit_direction="both")\n tm.assert_series_equal(result, expected)\n\n # Check that this works on a longer series of nans.\n s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12, np.nan])\n\n expected = Series([1.0, 3.0, 4.0, 5.0, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0])\n result = s.interpolate(method="linear", limit=2, limit_direction="both")\n tm.assert_series_equal(result, expected)\n\n expected = Series(\n [1.0, 3.0, 4.0, np.nan, 6.0, 7.0, 9.0, 10.0, 11.0, 12.0, 12.0]\n )\n result = s.interpolate(method="linear", limit=1, limit_direction="both")\n tm.assert_series_equal(result, expected)\n\n def test_interp_limit_to_ends(self):\n # These test are for issue #10420 -- flow back to beginning.\n s = Series([np.nan, np.nan, 5, 7, 9, np.nan])\n\n expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, np.nan])\n result = s.interpolate(method="linear", limit=2, limit_direction="backward")\n tm.assert_series_equal(result, expected)\n\n expected = Series([5.0, 5.0, 5.0, 7.0, 9.0, 9.0])\n result = s.interpolate(method="linear", limit=2, limit_direction="both")\n tm.assert_series_equal(result, expected)\n\n def test_interp_limit_before_ends(self):\n # These test are for issue #11115 -- limit ends properly.\n s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])\n\n expected = Series([np.nan, np.nan, 5.0, 7.0, 7.0, np.nan])\n result = s.interpolate(method="linear", limit=1, limit_direction="forward")\n tm.assert_series_equal(result, expected)\n\n expected = Series([np.nan, 5.0, 5.0, 7.0, np.nan, np.nan])\n result = s.interpolate(method="linear", limit=1, limit_direction="backward")\n tm.assert_series_equal(result, expected)\n\n expected = Series([np.nan, 5.0, 5.0, 7.0, 7.0, np.nan])\n result = s.interpolate(method="linear", limit=1, limit_direction="both")\n tm.assert_series_equal(result, expected)\n\n def test_interp_all_good(self):\n pytest.importorskip("scipy")\n s = Series([1, 2, 3])\n result = s.interpolate(method="polynomial", order=1)\n tm.assert_series_equal(result, s)\n\n # non-scipy\n result = s.interpolate()\n tm.assert_series_equal(result, s)\n\n @pytest.mark.parametrize(\n "check_scipy", [False, pytest.param(True, marks=td.skip_if_no("scipy"))]\n )\n def test_interp_multiIndex(self, check_scipy):\n idx = MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")])\n s = Series([1, 2, np.nan], index=idx)\n\n expected = s.copy()\n expected.loc[2] = 2\n result = s.interpolate()\n tm.assert_series_equal(result, expected)\n\n msg = "Only `method=linear` interpolation is supported on MultiIndexes"\n if check_scipy:\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method="polynomial", order=1)\n\n def test_interp_nonmono_raise(self):\n pytest.importorskip("scipy")\n s = Series([1, np.nan, 3], index=[0, 2, 1])\n msg = "krogh interpolation requires that the index be monotonic"\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method="krogh")\n\n @pytest.mark.parametrize("method", ["nearest", "pad"])\n def test_interp_datetime64(self, method, tz_naive_fixture):\n pytest.importorskip("scipy")\n df = Series(\n [1, np.nan, 3], index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture)\n )\n warn = None if method == "nearest" else FutureWarning\n msg = "Series.interpolate with method=pad is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n result = df.interpolate(method=method)\n if warn is not None:\n # check the "use ffill instead" is equivalent\n alt = df.ffill()\n tm.assert_series_equal(result, alt)\n\n expected = Series(\n [1.0, 1.0, 3.0],\n index=date_range("1/1/2000", periods=3, tz=tz_naive_fixture),\n )\n tm.assert_series_equal(result, expected)\n\n def test_interp_pad_datetime64tz_values(self):\n # GH#27628 missing.interpolate_2d should handle datetimetz values\n dti = date_range("2015-04-05", periods=3, tz="US/Central")\n ser = Series(dti)\n ser[1] = pd.NaT\n\n msg = "Series.interpolate with method=pad is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser.interpolate(method="pad")\n # check the "use ffill instead" is equivalent\n alt = ser.ffill()\n tm.assert_series_equal(result, alt)\n\n expected = Series(dti)\n expected[1] = expected[0]\n tm.assert_series_equal(result, expected)\n\n def test_interp_limit_no_nans(self):\n # GH 7173\n s = Series([1.0, 2.0, 3.0])\n result = s.interpolate(limit=1)\n expected = s\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("method", ["polynomial", "spline"])\n def test_no_order(self, method):\n # see GH-10633, GH-24014\n pytest.importorskip("scipy")\n s = Series([0, 1, np.nan, 3])\n msg = "You must specify the order of the spline or polynomial"\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method=method)\n\n @pytest.mark.parametrize("order", [-1, -1.0, 0, 0.0, np.nan])\n def test_interpolate_spline_invalid_order(self, order):\n pytest.importorskip("scipy")\n s = Series([0, 1, np.nan, 3])\n msg = "order needs to be specified and greater than 0"\n with pytest.raises(ValueError, match=msg):\n s.interpolate(method="spline", order=order)\n\n def test_spline(self):\n pytest.importorskip("scipy")\n s = Series([1, 2, np.nan, 4, 5, np.nan, 7])\n result = s.interpolate(method="spline", order=1)\n expected = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])\n tm.assert_series_equal(result, expected)\n\n def test_spline_extrapolate(self):\n pytest.importorskip("scipy")\n s = Series([1, 2, 3, 4, np.nan, 6, np.nan])\n result3 = s.interpolate(method="spline", order=1, ext=3)\n expected3 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0])\n tm.assert_series_equal(result3, expected3)\n\n result1 = s.interpolate(method="spline", order=1, ext=0)\n expected1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])\n tm.assert_series_equal(result1, expected1)\n\n def test_spline_smooth(self):\n pytest.importorskip("scipy")\n s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])\n assert (\n s.interpolate(method="spline", order=3, s=0)[5]\n != s.interpolate(method="spline", order=3)[5]\n )\n\n def test_spline_interpolation(self):\n # Explicit cast to float to avoid implicit cast when setting np.nan\n pytest.importorskip("scipy")\n s = Series(np.arange(10) ** 2, dtype="float")\n s[np.random.default_rng(2).integers(0, 9, 3)] = np.nan\n result1 = s.interpolate(method="spline", order=1)\n expected1 = s.interpolate(method="spline", order=1)\n tm.assert_series_equal(result1, expected1)\n\n def test_interp_timedelta64(self):\n # GH 6424\n df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 3]))\n result = df.interpolate(method="time")\n expected = Series([1.0, 2.0, 3.0], index=pd.to_timedelta([1, 2, 3]))\n tm.assert_series_equal(result, expected)\n\n # test for non uniform spacing\n df = Series([1, np.nan, 3], index=pd.to_timedelta([1, 2, 4]))\n result = df.interpolate(method="time")\n expected = Series([1.0, 1.666667, 3.0], index=pd.to_timedelta([1, 2, 4]))\n tm.assert_series_equal(result, expected)\n\n def test_series_interpolate_method_values(self):\n # GH#1646\n rng = date_range("1/1/2000", "1/20/2000", freq="D")\n ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)\n\n ts[::2] = np.nan\n\n result = ts.interpolate(method="values")\n exp = ts.interpolate()\n tm.assert_series_equal(result, exp)\n\n def test_series_interpolate_intraday(self):\n # #1698\n index = date_range("1/1/2012", periods=4, freq="12D")\n ts = Series([0, 12, 24, 36], index)\n new_index = index.append(index + pd.DateOffset(days=1)).sort_values()\n\n exp = ts.reindex(new_index).interpolate(method="time")\n\n index = date_range("1/1/2012", periods=4, freq="12h")\n ts = Series([0, 12, 24, 36], index)\n new_index = index.append(index + pd.DateOffset(hours=1)).sort_values()\n result = ts.reindex(new_index).interpolate(method="time")\n\n tm.assert_numpy_array_equal(result.values, exp.values)\n\n @pytest.mark.parametrize(\n "ind",\n [\n ["a", "b", "c", "d"],\n pd.period_range(start="2019-01-01", periods=4),\n pd.interval_range(start=0, end=4),\n ],\n )\n def test_interp_non_timedelta_index(self, interp_methods_ind, ind):\n # gh 21662\n df = pd.DataFrame([0, 1, np.nan, 3], index=ind)\n\n method, kwargs = interp_methods_ind\n if method == "pchip":\n pytest.importorskip("scipy")\n\n if method == "linear":\n result = df[0].interpolate(**kwargs)\n expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)\n tm.assert_series_equal(result, expected)\n else:\n expected_error = (\n "Index column must be numeric or datetime type when "\n f"using {method} method other than linear. "\n "Try setting a numeric or datetime index column before "\n "interpolating."\n )\n with pytest.raises(ValueError, match=expected_error):\n df[0].interpolate(method=method, **kwargs)\n\n def test_interpolate_timedelta_index(self, request, interp_methods_ind):\n """\n Tests for non numerical index types - object, period, timedelta\n Note that all methods except time, index, nearest and values\n are tested here.\n """\n # gh 21662\n pytest.importorskip("scipy")\n ind = pd.timedelta_range(start=1, periods=4)\n df = pd.DataFrame([0, 1, np.nan, 3], index=ind)\n\n method, kwargs = interp_methods_ind\n\n if method in {"cubic", "zero"}:\n request.applymarker(\n pytest.mark.xfail(\n reason=f"{method} interpolation is not supported for TimedeltaIndex"\n )\n )\n result = df[0].interpolate(method=method, **kwargs)\n expected = Series([0.0, 1.0, 2.0, 3.0], name=0, index=ind)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "ascending, expected_values",\n [(True, [1, 2, 3, 9, 10]), (False, [10, 9, 3, 2, 1])],\n )\n def test_interpolate_unsorted_index(self, ascending, expected_values):\n # GH 21037\n ts = Series(data=[10, 9, np.nan, 2, 1], index=[10, 9, 3, 2, 1])\n result = ts.sort_index(ascending=ascending).interpolate(method="index")\n expected = Series(data=expected_values, index=expected_values, dtype=float)\n tm.assert_series_equal(result, expected)\n\n def test_interpolate_asfreq_raises(self):\n ser = Series(["a", None, "b"], dtype=object)\n msg2 = "Series.interpolate with object dtype"\n msg = "Invalid fill method"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n ser.interpolate(method="asfreq")\n\n def test_interpolate_fill_value(self):\n # GH#54920\n pytest.importorskip("scipy")\n ser = Series([np.nan, 0, 1, np.nan, 3, np.nan])\n result = ser.interpolate(method="nearest", fill_value=0)\n expected = Series([np.nan, 0, 1, 1, 3, 0])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_interpolate.py | test_interpolate.py | Python | 34,267 | 0.95 | 0.089862 | 0.067935 | react-lib | 681 | 2024-08-31T04:34:32.904035 | MIT | true | f1a40de9fb3163a2cf6633fd850ca1d8 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Series,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core import algorithms\nfrom pandas.core.arrays import PeriodArray\n\n\nclass TestSeriesIsIn:\n def test_isin(self):\n s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])\n\n result = s.isin(["A", "C"])\n expected = Series([True, False, True, False, False, False, True, True])\n tm.assert_series_equal(result, expected)\n\n # GH#16012\n # This specific issue has to have a series over 1e6 in len, but the\n # comparison array (in_list) must be large enough so that numpy doesn't\n # do a manual masking trick that will avoid this issue altogether\n s = Series(list("abcdefghijk" * 10**5))\n # If numpy doesn't do the manual comparison/mask, these\n # unorderable mixed types are what cause the exception in numpy\n in_list = [-1, "a", "b", "G", "Y", "Z", "E", "K", "E", "S", "I", "R", "R"] * 6\n\n assert s.isin(in_list).sum() == 200000\n\n def test_isin_with_string_scalar(self):\n # GH#4763\n s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])\n msg = (\n r"only list-like objects are allowed to be passed to isin\(\), "\n r"you passed a `str`"\n )\n with pytest.raises(TypeError, match=msg):\n s.isin("a")\n\n s = Series(["aaa", "b", "c"])\n with pytest.raises(TypeError, match=msg):\n s.isin("aaa")\n\n def test_isin_datetimelike_mismatched_reso(self):\n expected = Series([True, True, False, False, False])\n\n ser = Series(date_range("jan-01-2013", "jan-05-2013"))\n\n # fails on dtype conversion in the first place\n day_values = np.asarray(ser[0:2].values).astype("datetime64[D]")\n result = ser.isin(day_values)\n tm.assert_series_equal(result, expected)\n\n dta = ser[:2]._values.astype("M8[s]")\n result = ser.isin(dta)\n tm.assert_series_equal(result, expected)\n\n def test_isin_datetimelike_mismatched_reso_list(self):\n expected = Series([True, True, False, False, False])\n\n ser = Series(date_range("jan-01-2013", "jan-05-2013"))\n\n dta = ser[:2]._values.astype("M8[s]")\n result = ser.isin(list(dta))\n tm.assert_series_equal(result, expected)\n\n def test_isin_with_i8(self):\n # GH#5021\n\n expected = Series([True, True, False, False, False])\n expected2 = Series([False, True, False, False, False])\n\n # datetime64[ns]\n s = Series(date_range("jan-01-2013", "jan-05-2013"))\n\n result = s.isin(s[0:2])\n tm.assert_series_equal(result, expected)\n\n result = s.isin(s[0:2].values)\n tm.assert_series_equal(result, expected)\n\n result = s.isin([s[1]])\n tm.assert_series_equal(result, expected2)\n\n result = s.isin([np.datetime64(s[1])])\n tm.assert_series_equal(result, expected2)\n\n result = s.isin(set(s[0:2]))\n tm.assert_series_equal(result, expected)\n\n # timedelta64[ns]\n s = Series(pd.to_timedelta(range(5), unit="d"))\n result = s.isin(s[0:2])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])\n def test_isin_empty(self, empty):\n # see GH#16991\n s = Series(["a", "b"])\n expected = Series([False, False])\n\n result = s.isin(empty)\n tm.assert_series_equal(expected, result)\n\n def test_isin_read_only(self):\n # https://github.com/pandas-dev/pandas/issues/37174\n arr = np.array([1, 2, 3])\n arr.setflags(write=False)\n s = Series([1, 2, 3])\n result = s.isin(arr)\n expected = Series([True, True, True])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", [object, None])\n def test_isin_dt64_values_vs_ints(self, dtype):\n # GH#36621 dont cast integers to datetimes for isin\n dti = date_range("2013-01-01", "2013-01-05")\n ser = Series(dti)\n\n comps = np.asarray([1356998400000000000], dtype=dtype)\n\n res = dti.isin(comps)\n expected = np.array([False] * len(dti), dtype=bool)\n tm.assert_numpy_array_equal(res, expected)\n\n res = ser.isin(comps)\n tm.assert_series_equal(res, Series(expected))\n\n res = pd.core.algorithms.isin(ser, comps)\n tm.assert_numpy_array_equal(res, expected)\n\n def test_isin_tzawareness_mismatch(self):\n dti = date_range("2013-01-01", "2013-01-05")\n ser = Series(dti)\n\n other = dti.tz_localize("UTC")\n\n res = dti.isin(other)\n expected = np.array([False] * len(dti), dtype=bool)\n tm.assert_numpy_array_equal(res, expected)\n\n res = ser.isin(other)\n tm.assert_series_equal(res, Series(expected))\n\n res = pd.core.algorithms.isin(ser, other)\n tm.assert_numpy_array_equal(res, expected)\n\n def test_isin_period_freq_mismatch(self):\n dti = date_range("2013-01-01", "2013-01-05")\n pi = dti.to_period("M")\n ser = Series(pi)\n\n # We construct another PeriodIndex with the same i8 values\n # but different dtype\n dtype = dti.to_period("Y").dtype\n other = PeriodArray._simple_new(pi.asi8, dtype=dtype)\n\n res = pi.isin(other)\n expected = np.array([False] * len(pi), dtype=bool)\n tm.assert_numpy_array_equal(res, expected)\n\n res = ser.isin(other)\n tm.assert_series_equal(res, Series(expected))\n\n res = pd.core.algorithms.isin(ser, other)\n tm.assert_numpy_array_equal(res, expected)\n\n @pytest.mark.parametrize("values", [[-9.0, 0.0], [-9, 0]])\n def test_isin_float_in_int_series(self, values):\n # GH#19356 GH#21804\n ser = Series(values)\n result = ser.isin([-9, -0.5])\n expected = Series([True, False])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", ["boolean", "Int64", "Float64"])\n @pytest.mark.parametrize(\n "data,values,expected",\n [\n ([0, 1, 0], [1], [False, True, False]),\n ([0, 1, 0], [1, pd.NA], [False, True, False]),\n ([0, pd.NA, 0], [1, 0], [True, False, True]),\n ([0, 1, pd.NA], [1, pd.NA], [False, True, True]),\n ([0, 1, pd.NA], [1, np.nan], [False, True, False]),\n ([0, pd.NA, pd.NA], [np.nan, pd.NaT, None], [False, False, False]),\n ],\n )\n def test_isin_masked_types(self, dtype, data, values, expected):\n # GH#42405\n ser = Series(data, dtype=dtype)\n\n result = ser.isin(values)\n expected = Series(expected, dtype="boolean")\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_isin_large_series_mixed_dtypes_and_nan(monkeypatch):\n # https://github.com/pandas-dev/pandas/issues/37094\n # combination of object dtype for the values\n # and > _MINIMUM_COMP_ARR_LEN elements\n min_isin_comp = 5\n ser = Series([1, 2, np.nan] * min_isin_comp)\n with monkeypatch.context() as m:\n m.setattr(algorithms, "_MINIMUM_COMP_ARR_LEN", min_isin_comp)\n result = ser.isin({"foo", "bar"})\n expected = Series([False] * 3 * min_isin_comp)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "array,expected",\n [\n (\n [0, 1j, 1j, 1, 1 + 1j, 1 + 2j, 1 + 1j],\n Series([False, True, True, False, True, True, True], dtype=bool),\n )\n ],\n)\ndef test_isin_complex_numbers(array, expected):\n # GH 17927\n result = Series(array).isin([1j, 1 + 1j, 1 + 2j])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,is_in",\n [([1, [2]], [1]), (["simple str", [{"values": 3}]], ["simple str"])],\n)\ndef test_isin_filtering_with_mixed_object_types(data, is_in):\n # GH 20883\n\n ser = Series(data)\n result = ser.isin(is_in)\n expected = Series([True, False])\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("data", [[1, 2, 3], [1.0, 2.0, 3.0]])\n@pytest.mark.parametrize("isin", [[1, 2], [1.0, 2.0]])\ndef test_isin_filtering_on_iterable(data, isin):\n # GH 50234\n\n ser = Series(data)\n result = ser.isin(i for i in isin)\n expected_result = Series([True, True, False])\n\n tm.assert_series_equal(result, expected_result)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_isin.py | test_isin.py | Python | 8,343 | 0.95 | 0.079365 | 0.123711 | python-kit | 629 | 2025-03-28T22:35:53.980057 | MIT | true | 753726a67e7e77b1751be77e9286850c |
"""\nWe also test Series.notna in this file.\n"""\nimport numpy as np\n\nfrom pandas import (\n Period,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestIsna:\n def test_isna_period_dtype(self):\n # GH#13737\n ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])\n\n expected = Series([False, True])\n\n result = ser.isna()\n tm.assert_series_equal(result, expected)\n\n result = ser.notna()\n tm.assert_series_equal(result, ~expected)\n\n def test_isna(self):\n ser = Series([0, 5.4, 3, np.nan, -0.001])\n expected = Series([False, False, False, True, False])\n tm.assert_series_equal(ser.isna(), expected)\n tm.assert_series_equal(ser.notna(), ~expected)\n\n ser = Series(["hi", "", np.nan])\n expected = Series([False, False, True])\n tm.assert_series_equal(ser.isna(), expected)\n tm.assert_series_equal(ser.notna(), ~expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_isna.py | test_isna.py | Python | 940 | 0.95 | 0.085714 | 0.037037 | node-utils | 23 | 2024-04-19T13:26:44.421822 | GPL-3.0 | true | f8a6f9fbc6ff4fddc9861a2da8dfb8cf |
import numpy as np\n\nfrom pandas import (\n Series,\n date_range,\n)\n\n\nclass TestIsMonotonic:\n def test_is_monotonic_numeric(self):\n ser = Series(np.random.default_rng(2).integers(0, 10, size=1000))\n assert not ser.is_monotonic_increasing\n ser = Series(np.arange(1000))\n assert ser.is_monotonic_increasing is True\n assert ser.is_monotonic_increasing is True\n ser = Series(np.arange(1000, 0, -1))\n assert ser.is_monotonic_decreasing is True\n\n def test_is_monotonic_dt64(self):\n ser = Series(date_range("20130101", periods=10))\n assert ser.is_monotonic_increasing is True\n assert ser.is_monotonic_increasing is True\n\n ser = Series(list(reversed(ser)))\n assert ser.is_monotonic_increasing is False\n assert ser.is_monotonic_decreasing is True\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_is_monotonic.py | test_is_monotonic.py | Python | 838 | 0.85 | 0.115385 | 0 | awesome-app | 294 | 2025-06-24T10:04:50.883109 | GPL-3.0 | true | a70e6961a31268adabf12dda34f35482 |
import numpy as np\nimport pytest\n\nfrom pandas import Series\n\n\n@pytest.mark.parametrize(\n "data, expected",\n [\n (np.random.default_rng(2).integers(0, 10, size=1000), False),\n (np.arange(1000), True),\n ([], True),\n ([np.nan], True),\n (["foo", "bar", np.nan], True),\n (["foo", "foo", np.nan], False),\n (["foo", "bar", np.nan, np.nan], False),\n ],\n)\ndef test_is_unique(data, expected):\n # GH#11946 / GH#25180\n ser = Series(data)\n assert ser.is_unique is expected\n\n\ndef test_is_unique_class_ne(capsys):\n # GH#20661\n class Foo:\n def __init__(self, val) -> None:\n self._value = val\n\n def __ne__(self, other):\n raise Exception("NEQ not supported")\n\n with capsys.disabled():\n li = [Foo(i) for i in range(5)]\n ser = Series(li, index=list(range(5)))\n\n ser.is_unique\n captured = capsys.readouterr()\n assert len(captured.err) == 0\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_is_unique.py | test_is_unique.py | Python | 953 | 0.95 | 0.15 | 0.0625 | react-lib | 552 | 2024-03-19T16:17:09.556062 | MIT | true | 5841bd0ead8556cfc14ff83c66feefe7 |
"""\nSeries.item method, mainly testing that we get python scalars as opposed to\nnumpy scalars.\n"""\nimport pytest\n\nfrom pandas import (\n Series,\n Timedelta,\n Timestamp,\n date_range,\n)\n\n\nclass TestItem:\n def test_item(self):\n # We are testing that we get python scalars as opposed to numpy scalars\n ser = Series([1])\n result = ser.item()\n assert result == 1\n assert result == ser.iloc[0]\n assert isinstance(result, int) # i.e. not np.int64\n\n ser = Series([0.5], index=[3])\n result = ser.item()\n assert isinstance(result, float)\n assert result == 0.5\n\n ser = Series([1, 2])\n msg = "can only convert an array of size 1"\n with pytest.raises(ValueError, match=msg):\n ser.item()\n\n dti = date_range("2016-01-01", periods=2)\n with pytest.raises(ValueError, match=msg):\n dti.item()\n with pytest.raises(ValueError, match=msg):\n Series(dti).item()\n\n val = dti[:1].item()\n assert isinstance(val, Timestamp)\n val = Series(dti)[:1].item()\n assert isinstance(val, Timestamp)\n\n tdi = dti - dti\n with pytest.raises(ValueError, match=msg):\n tdi.item()\n with pytest.raises(ValueError, match=msg):\n Series(tdi).item()\n\n val = tdi[:1].item()\n assert isinstance(val, Timedelta)\n val = Series(tdi)[:1].item()\n assert isinstance(val, Timedelta)\n\n # Case where ser[0] would not work\n ser = Series(dti, index=[5, 6])\n val = ser.iloc[:1].item()\n assert val == dti[0]\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_item.py | test_item.py | Python | 1,627 | 0.95 | 0.033898 | 0.040816 | node-utils | 907 | 2023-07-24T07:50:41.481522 | BSD-3-Clause | true | 9935752e3cf7aab344244a05a69c8791 |
from collections import (\n Counter,\n defaultdict,\n)\nfrom decimal import Decimal\nimport math\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n bdate_range,\n date_range,\n isna,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\ndef test_series_map_box_timedelta():\n # GH#11349\n ser = Series(timedelta_range("1 day 1 s", periods=5, freq="h"))\n\n def f(x):\n return x.total_seconds()\n\n ser.map(f)\n\n\ndef test_map_callable(datetime_series):\n with np.errstate(all="ignore"):\n tm.assert_series_equal(datetime_series.map(np.sqrt), np.sqrt(datetime_series))\n\n # map function element-wise\n tm.assert_series_equal(datetime_series.map(math.exp), np.exp(datetime_series))\n\n # empty series\n s = Series(dtype=object, name="foo", index=Index([], name="bar"))\n rs = s.map(lambda x: x)\n tm.assert_series_equal(s, rs)\n\n # check all metadata (GH 9322)\n assert s is not rs\n assert s.index is rs.index\n assert s.dtype == rs.dtype\n assert s.name == rs.name\n\n # index but no data\n s = Series(index=[1, 2, 3], dtype=np.float64)\n rs = s.map(lambda x: x)\n tm.assert_series_equal(s, rs)\n\n\ndef test_map_same_length_inference_bug():\n s = Series([1, 2])\n\n def f(x):\n return (x, x + 1)\n\n s = Series([1, 2, 3])\n result = s.map(f)\n expected = Series([(1, 2), (2, 3), (3, 4)])\n tm.assert_series_equal(result, expected)\n\n s = Series(["foo,bar"])\n result = s.map(lambda x: x.split(","))\n expected = Series([("foo", "bar")])\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_map_box_timestamps():\n # GH#2689, GH#2627\n ser = Series(date_range("1/1/2000", periods=3))\n\n def func(x):\n return (x.hour, x.day, x.month)\n\n result = ser.map(func)\n expected = Series([(0, 1, 1), (0, 2, 1), (0, 3, 1)])\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_series_stringdtype(any_string_dtype, using_infer_string):\n # map test on StringDType, GH#40823\n ser1 = Series(\n data=["cat", "dog", "rabbit"],\n index=["id1", "id2", "id3"],\n dtype=any_string_dtype,\n )\n ser2 = Series(["id3", "id2", "id1", "id7000"], dtype=any_string_dtype)\n result = ser2.map(ser1)\n\n item = pd.NA\n if ser2.dtype == object:\n item = np.nan\n\n expected = Series(data=["rabbit", "dog", "cat", item], dtype=any_string_dtype)\n if using_infer_string and any_string_dtype == "object":\n expected = expected.astype("str")\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data, expected_dtype",\n [(["1-1", "1-1", np.nan], "category"), (["1-1", "1-2", np.nan], "str")],\n)\ndef test_map_categorical_with_nan_values(data, expected_dtype):\n # GH 20714 bug fixed in: GH 24275\n def func(val):\n return val.split("-")[0]\n\n s = Series(data, dtype="category")\n\n result = s.map(func, na_action="ignore")\n expected = Series(["1", "1", np.nan], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_empty_integer_series():\n # GH52384\n s = Series([], dtype=int)\n result = s.map(lambda x: x)\n tm.assert_series_equal(result, s)\n\n\ndef test_map_empty_integer_series_with_datetime_index():\n # GH 21245\n s = Series([], index=date_range(start="2018-01-01", periods=0), dtype=int)\n result = s.map(lambda x: x)\n tm.assert_series_equal(result, s)\n\n\n@pytest.mark.parametrize("func", [str, lambda x: str(x)])\ndef test_map_simple_str_callables_same_as_astype(\n string_series, func, using_infer_string\n):\n # test that we are evaluating row-by-row first\n # before vectorized evaluation\n result = string_series.map(func)\n expected = string_series.astype(str if not using_infer_string else "str")\n tm.assert_series_equal(result, expected)\n\n\ndef test_list_raises(string_series):\n with pytest.raises(TypeError, match="'list' object is not callable"):\n string_series.map([lambda x: x])\n\n\ndef test_map():\n data = {\n "A": [0.0, 1.0, 2.0, 3.0, 4.0],\n "B": [0.0, 1.0, 0.0, 1.0, 0.0],\n "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],\n "D": bdate_range("1/1/2009", periods=5),\n }\n\n source = Series(data["B"], index=data["C"])\n target = Series(data["C"][:4], index=data["D"][:4])\n\n merged = target.map(source)\n\n for k, v in merged.items():\n assert v == source[target[k]]\n\n # input could be a dict\n merged = target.map(source.to_dict())\n\n for k, v in merged.items():\n assert v == source[target[k]]\n\n\ndef test_map_datetime(datetime_series):\n # function\n result = datetime_series.map(lambda x: x * 2)\n tm.assert_series_equal(result, datetime_series * 2)\n\n\ndef test_map_category():\n # GH 10324\n a = Series([1, 2, 3, 4])\n b = Series(["even", "odd", "even", "odd"], dtype="category")\n c = Series(["even", "odd", "even", "odd"])\n\n exp = Series(["odd", "even", "odd", np.nan], dtype="category")\n tm.assert_series_equal(a.map(b), exp)\n exp = Series(["odd", "even", "odd", np.nan])\n tm.assert_series_equal(a.map(c), exp)\n\n\ndef test_map_category_numeric():\n a = Series(["a", "b", "c", "d"])\n b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(["b", "c", "d", "e"]))\n c = Series([1, 2, 3, 4], index=Index(["b", "c", "d", "e"]))\n\n exp = Series([np.nan, 1, 2, 3])\n tm.assert_series_equal(a.map(b), exp)\n exp = Series([np.nan, 1, 2, 3])\n tm.assert_series_equal(a.map(c), exp)\n\n\ndef test_map_category_string():\n a = Series(["a", "b", "c", "d"])\n b = Series(\n ["B", "C", "D", "E"],\n dtype="category",\n index=pd.CategoricalIndex(["b", "c", "d", "e"]),\n )\n c = Series(["B", "C", "D", "E"], index=Index(["b", "c", "d", "e"]))\n\n exp = Series(\n pd.Categorical([np.nan, "B", "C", "D"], categories=["B", "C", "D", "E"])\n )\n tm.assert_series_equal(a.map(b), exp)\n exp = Series([np.nan, "B", "C", "D"])\n tm.assert_series_equal(a.map(c), exp)\n\n\n@pytest.mark.filterwarnings(r"ignore:Dtype inference:FutureWarning")\ndef test_map_empty(request, index):\n if isinstance(index, MultiIndex):\n request.applymarker(\n pytest.mark.xfail(\n reason="Initializing a Series from a MultiIndex is not supported"\n )\n )\n\n s = Series(index)\n result = s.map({})\n\n expected = Series(np.nan, index=s.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_compat():\n # related GH 8024\n s = Series([True, True, False], index=[1, 2, 3])\n result = s.map({True: "foo", False: "bar"})\n expected = Series(["foo", "foo", "bar"], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_int():\n left = Series({"a": 1.0, "b": 2.0, "c": 3.0, "d": 4})\n right = Series({1: 11, 2: 22, 3: 33})\n\n assert left.dtype == np.float64\n assert issubclass(right.dtype.type, np.integer)\n\n merged = left.map(right)\n assert merged.dtype == np.float64\n assert isna(merged["d"])\n assert not isna(merged["c"])\n\n\ndef test_map_type_inference():\n s = Series(range(3))\n s2 = s.map(lambda x: np.where(x == 0, 0, 1))\n assert issubclass(s2.dtype.type, np.integer)\n\n\ndef test_map_decimal(string_series):\n result = string_series.map(lambda x: Decimal(str(x)))\n assert result.dtype == np.object_\n assert isinstance(result.iloc[0], Decimal)\n\n\ndef test_map_na_exclusion():\n s = Series([1.5, np.nan, 3, np.nan, 5])\n\n result = s.map(lambda x: x * 2, na_action="ignore")\n exp = s * 2\n tm.assert_series_equal(result, exp)\n\n\ndef test_map_dict_with_tuple_keys():\n """\n Due to new MultiIndex-ing behaviour in v0.14.0,\n dicts with tuple keys passed to map were being\n converted to a multi-index, preventing tuple values\n from being mapped properly.\n """\n # GH 18496\n df = DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]})\n label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"}\n\n df["labels"] = df["a"].map(label_mappings)\n df["expected_labels"] = Series(["A", "B", "A", "B"], index=df.index)\n # All labels should be filled now\n tm.assert_series_equal(df["labels"], df["expected_labels"], check_names=False)\n\n\ndef test_map_counter():\n s = Series(["a", "b", "c"], index=[1, 2, 3])\n counter = Counter()\n counter["b"] = 5\n counter["c"] += 1\n result = s.map(counter)\n expected = Series([0, 5, 1], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_defaultdict():\n s = Series([1, 2, 3], index=["a", "b", "c"])\n default_dict = defaultdict(lambda: "blank")\n default_dict[1] = "stuff"\n result = s.map(default_dict)\n expected = Series(["stuff", "blank", "blank"], index=["a", "b", "c"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_dict_na_key():\n # https://github.com/pandas-dev/pandas/issues/17648\n # Checks that np.nan key is appropriately mapped\n s = Series([1, 2, np.nan])\n expected = Series(["a", "b", "c"])\n result = s.map({1: "a", 2: "b", np.nan: "c"})\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("na_action", [None, "ignore"])\ndef test_map_defaultdict_na_key(na_action):\n # GH 48813\n s = Series([1, 2, np.nan])\n default_map = defaultdict(lambda: "missing", {1: "a", 2: "b", np.nan: "c"})\n result = s.map(default_map, na_action=na_action)\n expected = Series({0: "a", 1: "b", 2: "c" if na_action is None else np.nan})\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("na_action", [None, "ignore"])\ndef test_map_defaultdict_missing_key(na_action):\n # GH 48813\n s = Series([1, 2, np.nan])\n default_map = defaultdict(lambda: "missing", {1: "a", 2: "b", 3: "c"})\n result = s.map(default_map, na_action=na_action)\n expected = Series({0: "a", 1: "b", 2: "missing" if na_action is None else np.nan})\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("na_action", [None, "ignore"])\ndef test_map_defaultdict_unmutated(na_action):\n # GH 48813\n s = Series([1, 2, np.nan])\n default_map = defaultdict(lambda: "missing", {1: "a", 2: "b", np.nan: "c"})\n expected_default_map = default_map.copy()\n s.map(default_map, na_action=na_action)\n assert default_map == expected_default_map\n\n\n@pytest.mark.parametrize("arg_func", [dict, Series])\ndef test_map_dict_ignore_na(arg_func):\n # GH#47527\n mapping = arg_func({1: 10, np.nan: 42})\n ser = Series([1, np.nan, 2])\n result = ser.map(mapping, na_action="ignore")\n expected = Series([10, np.nan, np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_defaultdict_ignore_na():\n # GH#47527\n mapping = defaultdict(int, {1: 10, np.nan: 42})\n ser = Series([1, np.nan, 2])\n result = ser.map(mapping)\n expected = Series([10, 42, 0])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "na_action, expected",\n [(None, Series([10.0, 42.0, np.nan])), ("ignore", Series([10, np.nan, np.nan]))],\n)\ndef test_map_categorical_na_ignore(na_action, expected):\n # GH#47527\n values = pd.Categorical([1, np.nan, 2], categories=[10, 1, 2])\n ser = Series(values)\n result = ser.map({1: 10, np.nan: 42}, na_action=na_action)\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_dict_subclass_with_missing():\n """\n Test Series.map with a dictionary subclass that defines __missing__,\n i.e. sets a default value (GH #15999).\n """\n\n class DictWithMissing(dict):\n def __missing__(self, key):\n return "missing"\n\n s = Series([1, 2, 3])\n dictionary = DictWithMissing({3: "three"})\n result = s.map(dictionary)\n expected = Series(["missing", "missing", "three"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_dict_subclass_without_missing():\n class DictWithoutMissing(dict):\n pass\n\n s = Series([1, 2, 3])\n dictionary = DictWithoutMissing({3: "three"})\n result = s.map(dictionary)\n expected = Series([np.nan, np.nan, "three"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_abc_mapping(non_dict_mapping_subclass):\n # https://github.com/pandas-dev/pandas/issues/29733\n # Check collections.abc.Mapping support as mapper for Series.map\n s = Series([1, 2, 3])\n not_a_dictionary = non_dict_mapping_subclass({3: "three"})\n result = s.map(not_a_dictionary)\n expected = Series([np.nan, np.nan, "three"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_abc_mapping_with_missing(non_dict_mapping_subclass):\n # https://github.com/pandas-dev/pandas/issues/29733\n # Check collections.abc.Mapping support as mapper for Series.map\n class NonDictMappingWithMissing(non_dict_mapping_subclass):\n def __missing__(self, key):\n return "missing"\n\n s = Series([1, 2, 3])\n not_a_dictionary = NonDictMappingWithMissing({3: "three"})\n result = s.map(not_a_dictionary)\n # __missing__ is a dict concept, not a Mapping concept,\n # so it should not change the result!\n expected = Series([np.nan, np.nan, "three"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_box_dt64(unit):\n vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]\n ser = Series(vals).dt.as_unit(unit)\n assert ser.dtype == f"datetime64[{unit}]"\n # boxed value must be Timestamp instance\n res = ser.map(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")\n exp = Series(["Timestamp_1_None", "Timestamp_2_None"])\n tm.assert_series_equal(res, exp)\n\n\ndef test_map_box_dt64tz(unit):\n vals = [\n pd.Timestamp("2011-01-01", tz="US/Eastern"),\n pd.Timestamp("2011-01-02", tz="US/Eastern"),\n ]\n ser = Series(vals).dt.as_unit(unit)\n assert ser.dtype == f"datetime64[{unit}, US/Eastern]"\n res = ser.map(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")\n exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])\n tm.assert_series_equal(res, exp)\n\n\ndef test_map_box_td64(unit):\n # timedelta\n vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]\n ser = Series(vals).dt.as_unit(unit)\n assert ser.dtype == f"timedelta64[{unit}]"\n res = ser.map(lambda x: f"{type(x).__name__}_{x.days}")\n exp = Series(["Timedelta_1", "Timedelta_2"])\n tm.assert_series_equal(res, exp)\n\n\ndef test_map_box_period():\n # period\n vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]\n ser = Series(vals)\n assert ser.dtype == "Period[M]"\n res = ser.map(lambda x: f"{type(x).__name__}_{x.freqstr}")\n exp = Series(["Period_M", "Period_M"])\n tm.assert_series_equal(res, exp)\n\n\n@pytest.mark.parametrize("na_action", [None, "ignore"])\ndef test_map_categorical(na_action, using_infer_string):\n values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)\n s = Series(values, name="XX", index=list("abcdefg"))\n\n result = s.map(lambda x: x.lower(), na_action=na_action)\n exp_values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)\n exp = Series(exp_values, name="XX", index=list("abcdefg"))\n tm.assert_series_equal(result, exp)\n tm.assert_categorical_equal(result.values, exp_values)\n\n result = s.map(lambda x: "A", na_action=na_action)\n exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))\n tm.assert_series_equal(result, exp)\n assert result.dtype == object if not using_infer_string else "str"\n\n\n@pytest.mark.parametrize(\n "na_action, expected",\n (\n [None, Series(["A", "B", "nan"], name="XX")],\n [\n "ignore",\n Series(\n ["A", "B", np.nan],\n name="XX",\n dtype=pd.CategoricalDtype(list("DCBA"), True),\n ),\n ],\n ),\n)\ndef test_map_categorical_na_action(na_action, expected):\n dtype = pd.CategoricalDtype(list("DCBA"), ordered=True)\n values = pd.Categorical(list("AB") + [np.nan], dtype=dtype)\n s = Series(values, name="XX")\n result = s.map(str, na_action=na_action)\n tm.assert_series_equal(result, expected)\n\n\ndef test_map_datetimetz():\n values = date_range("2011-01-01", "2011-01-02", freq="h").tz_localize("Asia/Tokyo")\n s = Series(values, name="XX")\n\n # keep tz\n result = s.map(lambda x: x + pd.offsets.Day())\n exp_values = date_range("2011-01-02", "2011-01-03", freq="h").tz_localize(\n "Asia/Tokyo"\n )\n exp = Series(exp_values, name="XX")\n tm.assert_series_equal(result, exp)\n\n result = s.map(lambda x: x.hour)\n exp = Series(list(range(24)) + [0], name="XX", dtype=np.int64)\n tm.assert_series_equal(result, exp)\n\n # not vectorized\n def f(x):\n if not isinstance(x, pd.Timestamp):\n raise ValueError\n return str(x.tz)\n\n result = s.map(f)\n exp = Series(["Asia/Tokyo"] * 25, name="XX")\n tm.assert_series_equal(result, exp)\n\n\n@pytest.mark.parametrize(\n "vals,mapping,exp",\n [\n (list("abc"), {np.nan: "not NaN"}, [np.nan] * 3 + ["not NaN"]),\n (list("abc"), {"a": "a letter"}, ["a letter"] + [np.nan] * 3),\n (list(range(3)), {0: 42}, [42] + [np.nan] * 3),\n ],\n)\ndef test_map_missing_mixed(vals, mapping, exp):\n # GH20495\n s = Series(vals + [np.nan])\n result = s.map(mapping)\n exp = Series(exp)\n tm.assert_series_equal(result, exp)\n\n\ndef test_map_scalar_on_date_time_index_aware_series():\n # GH 25959\n # Calling map on a localized time series should not cause an error\n series = Series(\n np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10, tz="UTC"),\n name="ts",\n )\n result = Series(series.index).map(lambda x: 1)\n tm.assert_series_equal(result, Series(np.ones(len(series)), dtype="int64"))\n\n\ndef test_map_float_to_string_precision():\n # GH 13228\n ser = Series(1 / 3)\n result = ser.map(lambda val: str(val)).to_dict()\n expected = {0: "0.3333333333333333"}\n assert result == expected\n\n\ndef test_map_to_timedelta():\n list_of_valid_strings = ["00:00:01", "00:00:02"]\n a = pd.to_timedelta(list_of_valid_strings)\n b = Series(list_of_valid_strings).map(pd.to_timedelta)\n tm.assert_series_equal(Series(a), b)\n\n list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]\n\n a = pd.to_timedelta(list_of_strings)\n ser = Series(list_of_strings)\n b = ser.map(pd.to_timedelta)\n tm.assert_series_equal(Series(a), b)\n\n\ndef test_map_type():\n # GH 46719\n s = Series([3, "string", float], index=["a", "b", "c"])\n result = s.map(type)\n expected = Series([int, str, type], index=["a", "b", "c"])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_map.py | test_map.py | Python | 18,550 | 0.95 | 0.11755 | 0.090129 | vue-tools | 2 | 2024-08-16T14:46:53.896998 | GPL-3.0 | true | dc3f2a5c8dba32256de942d95a61e187 |
import operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestMatmul:\n def test_matmul(self):\n # matmul test is for GH#10259\n a = Series(\n np.random.default_rng(2).standard_normal(4), index=["p", "q", "r", "s"]\n )\n b = DataFrame(\n np.random.default_rng(2).standard_normal((3, 4)),\n index=["1", "2", "3"],\n columns=["p", "q", "r", "s"],\n ).T\n\n # Series @ DataFrame -> Series\n result = operator.matmul(a, b)\n expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"])\n tm.assert_series_equal(result, expected)\n\n # DataFrame @ Series -> Series\n result = operator.matmul(b.T, a)\n expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])\n tm.assert_series_equal(result, expected)\n\n # Series @ Series -> scalar\n result = operator.matmul(a, a)\n expected = np.dot(a.values, a.values)\n tm.assert_almost_equal(result, expected)\n\n # GH#21530\n # vector (1D np.array) @ Series (__rmatmul__)\n result = operator.matmul(a.values, a)\n expected = np.dot(a.values, a.values)\n tm.assert_almost_equal(result, expected)\n\n # GH#21530\n # vector (1D list) @ Series (__rmatmul__)\n result = operator.matmul(a.values.tolist(), a)\n expected = np.dot(a.values, a.values)\n tm.assert_almost_equal(result, expected)\n\n # GH#21530\n # matrix (2D np.array) @ Series (__rmatmul__)\n result = operator.matmul(b.T.values, a)\n expected = np.dot(b.T.values, a.values)\n tm.assert_almost_equal(result, expected)\n\n # GH#21530\n # matrix (2D nested lists) @ Series (__rmatmul__)\n result = operator.matmul(b.T.values.tolist(), a)\n expected = np.dot(b.T.values, a.values)\n tm.assert_almost_equal(result, expected)\n\n # mixed dtype DataFrame @ Series\n a["p"] = int(a.p)\n result = operator.matmul(b.T, a)\n expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])\n tm.assert_series_equal(result, expected)\n\n # different dtypes DataFrame @ Series\n a = a.astype(int)\n result = operator.matmul(b.T, a)\n expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])\n tm.assert_series_equal(result, expected)\n\n msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)"\n # exception raised is of type Exception\n with pytest.raises(Exception, match=msg):\n a.dot(a.values[:3])\n msg = "matrices are not aligned"\n with pytest.raises(ValueError, match=msg):\n a.dot(b.T)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_matmul.py | test_matmul.py | Python | 2,767 | 0.95 | 0.036585 | 0.220588 | node-utils | 258 | 2023-11-17T11:52:52.061305 | MIT | true | cf01b6554ab9e42301d0df9f7fb288a5 |
"""\nNote: for naming purposes, most tests are title with as e.g. "test_nlargest_foo"\nbut are implicitly also testing nsmallest_foo.\n"""\nfrom itertools import product\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Series\nimport pandas._testing as tm\n\nmain_dtypes = [\n "datetime",\n "datetimetz",\n "timedelta",\n "int8",\n "int16",\n "int32",\n "int64",\n "float32",\n "float64",\n "uint8",\n "uint16",\n "uint32",\n "uint64",\n]\n\n\n@pytest.fixture\ndef s_main_dtypes():\n """\n A DataFrame with many dtypes\n\n * datetime\n * datetimetz\n * timedelta\n * [u]int{8,16,32,64}\n * float{32,64}\n\n The columns are the name of the dtype.\n """\n df = pd.DataFrame(\n {\n "datetime": pd.to_datetime(["2003", "2002", "2001", "2002", "2005"]),\n "datetimetz": pd.to_datetime(\n ["2003", "2002", "2001", "2002", "2005"]\n ).tz_localize("US/Eastern"),\n "timedelta": pd.to_timedelta(["3d", "2d", "1d", "2d", "5d"]),\n }\n )\n\n for dtype in [\n "int8",\n "int16",\n "int32",\n "int64",\n "float32",\n "float64",\n "uint8",\n "uint16",\n "uint32",\n "uint64",\n ]:\n df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)\n\n return df\n\n\n@pytest.fixture(params=main_dtypes)\ndef s_main_dtypes_split(request, s_main_dtypes):\n """Each series in s_main_dtypes."""\n return s_main_dtypes[request.param]\n\n\ndef assert_check_nselect_boundary(vals, dtype, method):\n # helper function for 'test_boundary_{dtype}' tests\n ser = Series(vals, dtype=dtype)\n result = getattr(ser, method)(3)\n expected_idxr = [0, 1, 2] if method == "nsmallest" else [3, 2, 1]\n expected = ser.loc[expected_idxr]\n tm.assert_series_equal(result, expected)\n\n\nclass TestSeriesNLargestNSmallest:\n @pytest.mark.parametrize(\n "r",\n [\n Series([3.0, 2, 1, 2, "5"], dtype="object"),\n Series([3.0, 2, 1, 2, 5], dtype="object"),\n # not supported on some archs\n # Series([3., 2, 1, 2, 5], dtype='complex256'),\n Series([3.0, 2, 1, 2, 5], dtype="complex128"),\n Series(list("abcde")),\n Series(list("abcde"), dtype="category"),\n ],\n )\n def test_nlargest_error(self, r):\n dt = r.dtype\n msg = f"Cannot use method 'n(largest|smallest)' with dtype {dt}"\n args = 2, len(r), 0, -1\n methods = r.nlargest, r.nsmallest\n for method, arg in product(methods, args):\n with pytest.raises(TypeError, match=msg):\n method(arg)\n\n def test_nsmallest_nlargest(self, s_main_dtypes_split):\n # float, int, datetime64 (use i8), timedelts64 (same),\n # object that are numbers, object that are strings\n ser = s_main_dtypes_split\n\n tm.assert_series_equal(ser.nsmallest(2), ser.iloc[[2, 1]])\n tm.assert_series_equal(ser.nsmallest(2, keep="last"), ser.iloc[[2, 3]])\n\n empty = ser.iloc[0:0]\n tm.assert_series_equal(ser.nsmallest(0), empty)\n tm.assert_series_equal(ser.nsmallest(-1), empty)\n tm.assert_series_equal(ser.nlargest(0), empty)\n tm.assert_series_equal(ser.nlargest(-1), empty)\n\n tm.assert_series_equal(ser.nsmallest(len(ser)), ser.sort_values())\n tm.assert_series_equal(ser.nsmallest(len(ser) + 1), ser.sort_values())\n tm.assert_series_equal(ser.nlargest(len(ser)), ser.iloc[[4, 0, 1, 3, 2]])\n tm.assert_series_equal(ser.nlargest(len(ser) + 1), ser.iloc[[4, 0, 1, 3, 2]])\n\n def test_nlargest_misc(self):\n ser = Series([3.0, np.nan, 1, 2, 5])\n result = ser.nlargest()\n expected = ser.iloc[[4, 0, 3, 2, 1]]\n tm.assert_series_equal(result, expected)\n result = ser.nsmallest()\n expected = ser.iloc[[2, 3, 0, 4, 1]]\n tm.assert_series_equal(result, expected)\n\n msg = 'keep must be either "first", "last"'\n with pytest.raises(ValueError, match=msg):\n ser.nsmallest(keep="invalid")\n with pytest.raises(ValueError, match=msg):\n ser.nlargest(keep="invalid")\n\n # GH#15297\n ser = Series([1] * 5, index=[1, 2, 3, 4, 5])\n expected_first = Series([1] * 3, index=[1, 2, 3])\n expected_last = Series([1] * 3, index=[5, 4, 3])\n\n result = ser.nsmallest(3)\n tm.assert_series_equal(result, expected_first)\n\n result = ser.nsmallest(3, keep="last")\n tm.assert_series_equal(result, expected_last)\n\n result = ser.nlargest(3)\n tm.assert_series_equal(result, expected_first)\n\n result = ser.nlargest(3, keep="last")\n tm.assert_series_equal(result, expected_last)\n\n @pytest.mark.parametrize("n", range(1, 5))\n def test_nlargest_n(self, n):\n # GH 13412\n ser = Series([1, 4, 3, 2], index=[0, 0, 1, 1])\n result = ser.nlargest(n)\n expected = ser.sort_values(ascending=False).head(n)\n tm.assert_series_equal(result, expected)\n\n result = ser.nsmallest(n)\n expected = ser.sort_values().head(n)\n tm.assert_series_equal(result, expected)\n\n def test_nlargest_boundary_integer(self, nselect_method, any_int_numpy_dtype):\n # GH#21426\n dtype_info = np.iinfo(any_int_numpy_dtype)\n min_val, max_val = dtype_info.min, dtype_info.max\n vals = [min_val, min_val + 1, max_val - 1, max_val]\n assert_check_nselect_boundary(vals, any_int_numpy_dtype, nselect_method)\n\n def test_nlargest_boundary_float(self, nselect_method, float_numpy_dtype):\n # GH#21426\n dtype_info = np.finfo(float_numpy_dtype)\n min_val, max_val = dtype_info.min, dtype_info.max\n min_2nd, max_2nd = np.nextafter([min_val, max_val], 0, dtype=float_numpy_dtype)\n vals = [min_val, min_2nd, max_2nd, max_val]\n assert_check_nselect_boundary(vals, float_numpy_dtype, nselect_method)\n\n @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])\n def test_nlargest_boundary_datetimelike(self, nselect_method, dtype):\n # GH#21426\n # use int64 bounds and +1 to min_val since true minimum is NaT\n # (include min_val/NaT at end to maintain same expected_idxr)\n dtype_info = np.iinfo("int64")\n min_val, max_val = dtype_info.min, dtype_info.max\n vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val]\n assert_check_nselect_boundary(vals, dtype, nselect_method)\n\n def test_nlargest_duplicate_keep_all_ties(self):\n # see GH#16818\n ser = Series([10, 9, 8, 7, 7, 7, 7, 6])\n result = ser.nlargest(4, keep="all")\n expected = Series([10, 9, 8, 7, 7, 7, 7])\n tm.assert_series_equal(result, expected)\n\n result = ser.nsmallest(2, keep="all")\n expected = Series([6, 7, 7, 7, 7], index=[7, 3, 4, 5, 6])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data,expected", [([True, False], [True]), ([True, False, True, True], [True])]\n )\n def test_nlargest_boolean(self, data, expected):\n # GH#26154 : ensure True > False\n ser = Series(data)\n result = ser.nlargest(1)\n expected = Series(expected)\n tm.assert_series_equal(result, expected)\n\n def test_nlargest_nullable(self, any_numeric_ea_dtype):\n # GH#42816\n dtype = any_numeric_ea_dtype\n if dtype.startswith("UInt"):\n # Can't cast from negative float to uint on some platforms\n arr = np.random.default_rng(2).integers(1, 10, 10)\n else:\n arr = np.random.default_rng(2).standard_normal(10)\n arr = arr.astype(dtype.lower(), copy=False)\n\n ser = Series(arr.copy(), dtype=dtype)\n ser[1] = pd.NA\n result = ser.nlargest(5)\n\n expected = (\n Series(np.delete(arr, 1), index=ser.index.delete(1))\n .nlargest(5)\n .astype(dtype)\n )\n tm.assert_series_equal(result, expected)\n\n def test_nsmallest_nan_when_keep_is_all(self):\n # GH#46589\n s = Series([1, 2, 3, 3, 3, None])\n result = s.nsmallest(3, keep="all")\n expected = Series([1.0, 2.0, 3.0, 3.0, 3.0])\n tm.assert_series_equal(result, expected)\n\n s = Series([1, 2, None, None, None])\n result = s.nsmallest(3, keep="all")\n expected = Series([1, 2, None, None, None])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_nlargest.py | test_nlargest.py | Python | 8,442 | 0.95 | 0.08871 | 0.105263 | vue-tools | 331 | 2024-09-16T13:47:25.983392 | GPL-3.0 | true | c50ab4b8b1841380ef91320794dda365 |
import numpy as np\n\nfrom pandas import (\n Categorical,\n Series,\n)\n\n\ndef test_nunique():\n # basics.rst doc example\n series = Series(np.random.default_rng(2).standard_normal(500))\n series[20:500] = np.nan\n series[10:20] = 5000\n result = series.nunique()\n assert result == 11\n\n\ndef test_nunique_categorical():\n # GH#18051\n ser = Series(Categorical([]))\n assert ser.nunique() == 0\n\n ser = Series(Categorical([np.nan]))\n assert ser.nunique() == 0\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_nunique.py | test_nunique.py | Python | 481 | 0.95 | 0.083333 | 0.111111 | awesome-app | 588 | 2024-04-16T02:20:46.592303 | MIT | true | 0c44d95be042df1ec76c614b92ee4bab |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesPctChange:\n def test_pct_change(self, datetime_series):\n msg = (\n "The 'fill_method' keyword being not None and the 'limit' keyword in "\n "Series.pct_change are deprecated"\n )\n\n rs = datetime_series.pct_change(fill_method=None)\n tm.assert_series_equal(rs, datetime_series / datetime_series.shift(1) - 1)\n\n rs = datetime_series.pct_change(2)\n filled = datetime_series.ffill()\n tm.assert_series_equal(rs, filled / filled.shift(2) - 1)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = datetime_series.pct_change(fill_method="bfill", limit=1)\n filled = datetime_series.bfill(limit=1)\n tm.assert_series_equal(rs, filled / filled.shift(1) - 1)\n\n rs = datetime_series.pct_change(freq="5D")\n filled = datetime_series.ffill()\n tm.assert_series_equal(\n rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)\n )\n\n def test_pct_change_with_duplicate_axis(self):\n # GH#28664\n common_idx = date_range("2019-11-14", periods=5, freq="D")\n result = Series(range(5), common_idx).pct_change(freq="B")\n\n # the reason that the expected should be like this is documented at PR 28681\n expected = Series([np.nan, np.inf, np.nan, np.nan, 3.0], common_idx)\n\n tm.assert_series_equal(result, expected)\n\n def test_pct_change_shift_over_nas(self):\n s = Series([1.0, 1.5, np.nan, 2.5, 3.0])\n\n msg = "The default fill_method='pad' in Series.pct_change is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n chg = s.pct_change()\n\n expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])\n tm.assert_series_equal(chg, expected)\n\n @pytest.mark.parametrize(\n "freq, periods, fill_method, limit",\n [\n ("5B", 5, None, None),\n ("3B", 3, None, None),\n ("3B", 3, "bfill", None),\n ("7B", 7, "pad", 1),\n ("7B", 7, "bfill", 3),\n ("14B", 14, None, None),\n ],\n )\n def test_pct_change_periods_freq(\n self, freq, periods, fill_method, limit, datetime_series\n ):\n msg = (\n "The 'fill_method' keyword being not None and the 'limit' keyword in "\n "Series.pct_change are deprecated"\n )\n\n # GH#7292\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs_freq = datetime_series.pct_change(\n freq=freq, fill_method=fill_method, limit=limit\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs_periods = datetime_series.pct_change(\n periods, fill_method=fill_method, limit=limit\n )\n tm.assert_series_equal(rs_freq, rs_periods)\n\n empty_ts = Series(index=datetime_series.index, dtype=object)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs_freq = empty_ts.pct_change(\n freq=freq, fill_method=fill_method, limit=limit\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs_periods = empty_ts.pct_change(\n periods, fill_method=fill_method, limit=limit\n )\n tm.assert_series_equal(rs_freq, rs_periods)\n\n\n@pytest.mark.parametrize("fill_method", ["pad", "ffill", None])\ndef test_pct_change_with_duplicated_indices(fill_method):\n # GH30463\n s = Series([np.nan, 1, 2, 3, 9, 18], index=["a", "b"] * 3)\n\n warn = None if fill_method is None else FutureWarning\n msg = (\n "The 'fill_method' keyword being not None and the 'limit' keyword in "\n "Series.pct_change are deprecated"\n )\n with tm.assert_produces_warning(warn, match=msg):\n result = s.pct_change(fill_method=fill_method)\n\n expected = Series([np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], index=["a", "b"] * 3)\n tm.assert_series_equal(result, expected)\n\n\ndef test_pct_change_no_warning_na_beginning():\n # GH#54981\n ser = Series([None, None, 1, 2, 3])\n result = ser.pct_change()\n expected = Series([np.nan, np.nan, np.nan, 1, 0.5])\n tm.assert_series_equal(result, expected)\n\n\ndef test_pct_change_empty():\n # GH 57056\n ser = Series([], dtype="float64")\n expected = ser.copy()\n result = ser.pct_change(periods=0)\n tm.assert_series_equal(expected, result)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_pct_change.py | test_pct_change.py | Python | 4,523 | 0.95 | 0.070313 | 0.057692 | node-utils | 59 | 2025-01-15T07:25:54.939837 | BSD-3-Clause | true | ee0419d166bfab33cbbe5553ae365392 |
from pandas import Series\nimport pandas._testing as tm\n\n\ndef test_pop():\n # GH#6600\n ser = Series([0, 4, 0], index=["A", "B", "C"], name=4)\n\n result = ser.pop("B")\n assert result == 4\n\n expected = Series([0, 0], index=["A", "C"], name=4)\n tm.assert_series_equal(ser, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_pop.py | test_pop.py | Python | 295 | 0.95 | 0.076923 | 0.111111 | awesome-app | 321 | 2025-06-14T16:57:19.368994 | MIT | true | 8d725b2876dbbf53abf08e38c7d34afa |
import numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_integer\n\nimport pandas as pd\nfrom pandas import (\n Index,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexes.datetimes import Timestamp\n\n\nclass TestSeriesQuantile:\n def test_quantile(self, datetime_series):\n q = datetime_series.quantile(0.1)\n assert q == np.percentile(datetime_series.dropna(), 10)\n\n q = datetime_series.quantile(0.9)\n assert q == np.percentile(datetime_series.dropna(), 90)\n\n # object dtype\n q = Series(datetime_series, dtype=object).quantile(0.9)\n assert q == np.percentile(datetime_series.dropna(), 90)\n\n # datetime64[ns] dtype\n dts = datetime_series.index.to_series()\n q = dts.quantile(0.2)\n assert q == Timestamp("2000-01-10 19:12:00")\n\n # timedelta64[ns] dtype\n tds = dts.diff()\n q = tds.quantile(0.25)\n assert q == pd.to_timedelta("24:00:00")\n\n # GH7661\n result = Series([np.timedelta64("NaT")]).sum()\n assert result == pd.Timedelta(0)\n\n msg = "percentiles should all be in the interval \\[0, 1\\]"\n for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:\n with pytest.raises(ValueError, match=msg):\n datetime_series.quantile(invalid)\n\n s = Series(np.random.default_rng(2).standard_normal(100))\n percentile_array = [-0.5, 0.25, 1.5]\n with pytest.raises(ValueError, match=msg):\n s.quantile(percentile_array)\n\n def test_quantile_multi(self, datetime_series, unit):\n datetime_series.index = datetime_series.index.as_unit(unit)\n qs = [0.1, 0.9]\n result = datetime_series.quantile(qs)\n expected = Series(\n [\n np.percentile(datetime_series.dropna(), 10),\n np.percentile(datetime_series.dropna(), 90),\n ],\n index=qs,\n name=datetime_series.name,\n )\n tm.assert_series_equal(result, expected)\n\n dts = datetime_series.index.to_series()\n dts.name = "xxx"\n result = dts.quantile((0.2, 0.2))\n expected = Series(\n [Timestamp("2000-01-10 19:12:00"), Timestamp("2000-01-10 19:12:00")],\n index=[0.2, 0.2],\n name="xxx",\n dtype=f"M8[{unit}]",\n )\n tm.assert_series_equal(result, expected)\n\n result = datetime_series.quantile([])\n expected = Series(\n [], name=datetime_series.name, index=Index([], dtype=float), dtype="float64"\n )\n tm.assert_series_equal(result, expected)\n\n def test_quantile_interpolation(self, datetime_series):\n # see gh-10174\n\n # interpolation = linear (default case)\n q = datetime_series.quantile(0.1, interpolation="linear")\n assert q == np.percentile(datetime_series.dropna(), 10)\n q1 = datetime_series.quantile(0.1)\n assert q1 == np.percentile(datetime_series.dropna(), 10)\n\n # test with and without interpolation keyword\n assert q == q1\n\n def test_quantile_interpolation_dtype(self):\n # GH #10174\n\n # interpolation = linear (default case)\n q = Series([1, 3, 4]).quantile(0.5, interpolation="lower")\n assert q == np.percentile(np.array([1, 3, 4]), 50)\n assert is_integer(q)\n\n q = Series([1, 3, 4]).quantile(0.5, interpolation="higher")\n assert q == np.percentile(np.array([1, 3, 4]), 50)\n assert is_integer(q)\n\n def test_quantile_nan(self):\n # GH 13098\n ser = Series([1, 2, 3, 4, np.nan])\n result = ser.quantile(0.5)\n expected = 2.5\n assert result == expected\n\n # all nan/empty\n s1 = Series([], dtype=object)\n cases = [s1, Series([np.nan, np.nan])]\n\n for ser in cases:\n res = ser.quantile(0.5)\n assert np.isnan(res)\n\n res = ser.quantile([0.5])\n tm.assert_series_equal(res, Series([np.nan], index=[0.5]))\n\n res = ser.quantile([0.2, 0.3])\n tm.assert_series_equal(res, Series([np.nan, np.nan], index=[0.2, 0.3]))\n\n @pytest.mark.parametrize(\n "case",\n [\n [\n Timestamp("2011-01-01"),\n Timestamp("2011-01-02"),\n Timestamp("2011-01-03"),\n ],\n [\n Timestamp("2011-01-01", tz="US/Eastern"),\n Timestamp("2011-01-02", tz="US/Eastern"),\n Timestamp("2011-01-03", tz="US/Eastern"),\n ],\n [pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")],\n # NaT\n [\n Timestamp("2011-01-01"),\n Timestamp("2011-01-02"),\n Timestamp("2011-01-03"),\n pd.NaT,\n ],\n [\n Timestamp("2011-01-01", tz="US/Eastern"),\n Timestamp("2011-01-02", tz="US/Eastern"),\n Timestamp("2011-01-03", tz="US/Eastern"),\n pd.NaT,\n ],\n [\n pd.Timedelta("1 days"),\n pd.Timedelta("2 days"),\n pd.Timedelta("3 days"),\n pd.NaT,\n ],\n ],\n )\n def test_quantile_box(self, case):\n ser = Series(case, name="XXX")\n res = ser.quantile(0.5)\n assert res == case[1]\n\n res = ser.quantile([0.5])\n exp = Series([case[1]], index=[0.5], name="XXX")\n tm.assert_series_equal(res, exp)\n\n def test_datetime_timedelta_quantiles(self):\n # covers #9694\n assert pd.isna(Series([], dtype="M8[ns]").quantile(0.5))\n assert pd.isna(Series([], dtype="m8[ns]").quantile(0.5))\n\n def test_quantile_nat(self):\n res = Series([pd.NaT, pd.NaT]).quantile(0.5)\n assert res is pd.NaT\n\n res = Series([pd.NaT, pd.NaT]).quantile([0.5])\n tm.assert_series_equal(res, Series([pd.NaT], index=[0.5]))\n\n @pytest.mark.parametrize(\n "values, dtype",\n [([0, 0, 0, 1, 2, 3], "Sparse[int]"), ([0.0, None, 1.0, 2.0], "Sparse[float]")],\n )\n def test_quantile_sparse(self, values, dtype):\n ser = Series(values, dtype=dtype)\n result = ser.quantile([0.5])\n expected = Series(np.asarray(ser)).quantile([0.5]).astype("Sparse[float]")\n tm.assert_series_equal(result, expected)\n\n def test_quantile_empty_float64(self):\n # floats\n ser = Series([], dtype="float64")\n\n res = ser.quantile(0.5)\n assert np.isnan(res)\n\n res = ser.quantile([0.5])\n exp = Series([np.nan], index=[0.5])\n tm.assert_series_equal(res, exp)\n\n def test_quantile_empty_int64(self):\n # int\n ser = Series([], dtype="int64")\n\n res = ser.quantile(0.5)\n assert np.isnan(res)\n\n res = ser.quantile([0.5])\n exp = Series([np.nan], index=[0.5])\n tm.assert_series_equal(res, exp)\n\n def test_quantile_empty_dt64(self):\n # datetime\n ser = Series([], dtype="datetime64[ns]")\n\n res = ser.quantile(0.5)\n assert res is pd.NaT\n\n res = ser.quantile([0.5])\n exp = Series([pd.NaT], index=[0.5], dtype=ser.dtype)\n tm.assert_series_equal(res, exp)\n\n @pytest.mark.parametrize("dtype", [int, float, "Int64"])\n def test_quantile_dtypes(self, dtype):\n result = Series([1, 2, 3], dtype=dtype).quantile(np.arange(0, 1, 0.25))\n expected = Series(np.arange(1, 3, 0.5), index=np.arange(0, 1, 0.25))\n if dtype == "Int64":\n expected = expected.astype("Float64")\n tm.assert_series_equal(result, expected)\n\n def test_quantile_all_na(self, any_int_ea_dtype):\n # GH#50681\n ser = Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)\n with tm.assert_produces_warning(None):\n result = ser.quantile([0.1, 0.5])\n expected = Series([pd.NA, pd.NA], dtype=any_int_ea_dtype, index=[0.1, 0.5])\n tm.assert_series_equal(result, expected)\n\n def test_quantile_dtype_size(self, any_int_ea_dtype):\n # GH#50681\n ser = Series([pd.NA, pd.NA, 1], dtype=any_int_ea_dtype)\n result = ser.quantile([0.1, 0.5])\n expected = Series([1, 1], dtype=any_int_ea_dtype, index=[0.1, 0.5])\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_quantile.py | test_quantile.py | Python | 8,284 | 0.95 | 0.076923 | 0.088235 | vue-tools | 367 | 2023-12-20T04:00:03.452976 | Apache-2.0 | true | 083138abfe4e7b7a7466c466c49cf718 |
from itertools import chain\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.algos import (\n Infinity,\n NegInfinity,\n)\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n NA,\n NaT,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.api.types import CategoricalDtype\n\n\n@pytest.fixture\ndef ser():\n return Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])\n\n\n@pytest.fixture(\n params=[\n ["average", np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5])],\n ["min", np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5])],\n ["max", np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6])],\n ["first", np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6])],\n ["dense", np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])],\n ],\n ids=lambda x: x[0],\n)\ndef results(request):\n return request.param\n\n\n@pytest.fixture(\n params=[\n "object",\n "float64",\n "int64",\n "Float64",\n "Int64",\n pytest.param("float64[pyarrow]", marks=td.skip_if_no("pyarrow")),\n pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow")),\n pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),\n "string[python]",\n "str",\n ]\n)\ndef dtype(request):\n return request.param\n\n\ndef expected_dtype(dtype, method, pct=False):\n exp_dtype = "float64"\n # elif dtype in ["Int64", "Float64", "string[pyarrow]", "string[python]"]:\n if dtype in ["string[pyarrow]"]:\n exp_dtype = "Float64"\n elif dtype in ["float64[pyarrow]", "int64[pyarrow]"]:\n if method == "average" or pct:\n exp_dtype = "double[pyarrow]"\n else:\n exp_dtype = "uint64[pyarrow]"\n\n return exp_dtype\n\n\nclass TestSeriesRank:\n def test_rank(self, datetime_series):\n sp_stats = pytest.importorskip("scipy.stats")\n\n datetime_series[::2] = np.nan\n datetime_series[:10:3] = 4.0\n\n ranks = datetime_series.rank()\n oranks = datetime_series.astype("O").rank()\n\n tm.assert_series_equal(ranks, oranks)\n\n mask = np.isnan(datetime_series)\n filled = datetime_series.fillna(np.inf)\n\n # rankdata returns a ndarray\n exp = Series(sp_stats.rankdata(filled), index=filled.index, name="ts")\n exp[mask] = np.nan\n\n tm.assert_series_equal(ranks, exp)\n\n iseries = Series(np.arange(5).repeat(2))\n\n iranks = iseries.rank()\n exp = iseries.astype(float).rank()\n tm.assert_series_equal(iranks, exp)\n iseries = Series(np.arange(5)) + 1.0\n exp = iseries / 5.0\n iranks = iseries.rank(pct=True)\n\n tm.assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(1, 100))\n exp = Series(np.repeat(0.505, 100))\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n # Explicit cast to float to avoid implicit cast when setting nan\n iseries = iseries.astype("float")\n iseries[1] = np.nan\n exp = Series(np.repeat(50.0 / 99.0, 100))\n exp[1] = np.nan\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1.0\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(np.nan, 100))\n exp = iseries.copy()\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n # Explicit cast to float to avoid implicit cast when setting nan\n iseries = Series(np.arange(5), dtype="float") + 1\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n rng = date_range("1/1/1990", periods=5)\n # Explicit cast to float to avoid implicit cast when setting nan\n iseries = Series(np.arange(5), rng, dtype="float") + 1\n iseries.iloc[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])\n exp = Series([2, 1, 3, 5, 4, 6.0])\n iranks = iseries.rank()\n tm.assert_series_equal(iranks, exp)\n\n # GH 5968\n iseries = Series(["3 day", "1 day 10m", "-2 day", NaT], dtype="m8[ns]")\n exp = Series([3, 2, 1, np.nan])\n iranks = iseries.rank()\n tm.assert_series_equal(iranks, exp)\n\n values = np.array(\n [-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40],\n dtype="float64",\n )\n random_order = np.random.default_rng(2).permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype="float64")\n iranks = iseries.rank()\n tm.assert_series_equal(iranks, exp)\n\n def test_rank_categorical(self):\n # GH issue #15420 rank incorrectly orders ordered categories\n\n # Test ascending/descending ranking for ordered categoricals\n exp = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n exp_desc = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0])\n ordered = Series(\n ["first", "second", "third", "fourth", "fifth", "sixth"]\n ).astype(\n CategoricalDtype(\n categories=["first", "second", "third", "fourth", "fifth", "sixth"],\n ordered=True,\n )\n )\n tm.assert_series_equal(ordered.rank(), exp)\n tm.assert_series_equal(ordered.rank(ascending=False), exp_desc)\n\n # Unordered categoricals should be ranked as objects\n unordered = Series(\n ["first", "second", "third", "fourth", "fifth", "sixth"]\n ).astype(\n CategoricalDtype(\n categories=["first", "second", "third", "fourth", "fifth", "sixth"],\n ordered=False,\n )\n )\n exp_unordered = Series([2.0, 4.0, 6.0, 3.0, 1.0, 5.0])\n res = unordered.rank()\n tm.assert_series_equal(res, exp_unordered)\n\n unordered1 = Series([1, 2, 3, 4, 5, 6]).astype(\n CategoricalDtype([1, 2, 3, 4, 5, 6], False)\n )\n exp_unordered1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n res1 = unordered1.rank()\n tm.assert_series_equal(res1, exp_unordered1)\n\n # Test na_option for rank data\n na_ser = Series(\n ["first", "second", "third", "fourth", "fifth", "sixth", np.nan]\n ).astype(\n CategoricalDtype(\n ["first", "second", "third", "fourth", "fifth", "sixth", "seventh"],\n True,\n )\n )\n\n exp_top = Series([2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.0])\n exp_bot = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])\n exp_keep = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, np.nan])\n\n tm.assert_series_equal(na_ser.rank(na_option="top"), exp_top)\n tm.assert_series_equal(na_ser.rank(na_option="bottom"), exp_bot)\n tm.assert_series_equal(na_ser.rank(na_option="keep"), exp_keep)\n\n # Test na_option for rank data with ascending False\n exp_top = Series([7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0])\n exp_bot = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 7.0])\n exp_keep = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0, np.nan])\n\n tm.assert_series_equal(na_ser.rank(na_option="top", ascending=False), exp_top)\n tm.assert_series_equal(\n na_ser.rank(na_option="bottom", ascending=False), exp_bot\n )\n tm.assert_series_equal(na_ser.rank(na_option="keep", ascending=False), exp_keep)\n\n # Test invalid values for na_option\n msg = "na_option must be one of 'keep', 'top', or 'bottom'"\n\n with pytest.raises(ValueError, match=msg):\n na_ser.rank(na_option="bad", ascending=False)\n\n # invalid type\n with pytest.raises(ValueError, match=msg):\n na_ser.rank(na_option=True, ascending=False)\n\n # Test with pct=True\n na_ser = Series(["first", "second", "third", "fourth", np.nan]).astype(\n CategoricalDtype(["first", "second", "third", "fourth"], True)\n )\n exp_top = Series([0.4, 0.6, 0.8, 1.0, 0.2])\n exp_bot = Series([0.2, 0.4, 0.6, 0.8, 1.0])\n exp_keep = Series([0.25, 0.5, 0.75, 1.0, np.nan])\n\n tm.assert_series_equal(na_ser.rank(na_option="top", pct=True), exp_top)\n tm.assert_series_equal(na_ser.rank(na_option="bottom", pct=True), exp_bot)\n tm.assert_series_equal(na_ser.rank(na_option="keep", pct=True), exp_keep)\n\n def test_rank_signature(self):\n s = Series([0, 1])\n s.rank(method="average")\n msg = "No axis named average for object type Series"\n with pytest.raises(ValueError, match=msg):\n s.rank("average")\n\n def test_rank_tie_methods(self, ser, results, dtype, using_infer_string):\n method, exp = results\n if (\n dtype == "int64"\n or dtype == "Int64"\n or (not using_infer_string and dtype == "str")\n ):\n pytest.skip("int64/str does not support NaN")\n\n ser = ser if dtype is None else ser.astype(dtype)\n result = ser.rank(method=method)\n tm.assert_series_equal(result, Series(exp, dtype=expected_dtype(dtype, method)))\n\n @pytest.mark.parametrize("ascending", [True, False])\n @pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"])\n @pytest.mark.parametrize("na_option", ["top", "bottom", "keep"])\n @pytest.mark.parametrize(\n "dtype, na_value, pos_inf, neg_inf",\n [\n ("object", None, Infinity(), NegInfinity()),\n ("float64", np.nan, np.inf, -np.inf),\n ("Float64", NA, np.inf, -np.inf),\n pytest.param(\n "float64[pyarrow]",\n NA,\n np.inf,\n -np.inf,\n marks=td.skip_if_no("pyarrow"),\n ),\n ],\n )\n def test_rank_tie_methods_on_infs_nans(\n self, method, na_option, ascending, dtype, na_value, pos_inf, neg_inf\n ):\n pytest.importorskip("scipy")\n if dtype == "float64[pyarrow]":\n if method == "average":\n exp_dtype = "float64[pyarrow]"\n else:\n exp_dtype = "uint64[pyarrow]"\n else:\n exp_dtype = "float64"\n\n chunk = 3\n in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk\n iseries = Series(in_arr, dtype=dtype)\n exp_ranks = {\n "average": ([2, 2, 2], [5, 5, 5], [8, 8, 8]),\n "min": ([1, 1, 1], [4, 4, 4], [7, 7, 7]),\n "max": ([3, 3, 3], [6, 6, 6], [9, 9, 9]),\n "first": ([1, 2, 3], [4, 5, 6], [7, 8, 9]),\n "dense": ([1, 1, 1], [2, 2, 2], [3, 3, 3]),\n }\n ranks = exp_ranks[method]\n if na_option == "top":\n order = [ranks[1], ranks[0], ranks[2]]\n elif na_option == "bottom":\n order = [ranks[0], ranks[2], ranks[1]]\n else:\n order = [ranks[0], [np.nan] * chunk, ranks[1]]\n expected = order if ascending else order[::-1]\n expected = list(chain.from_iterable(expected))\n result = iseries.rank(method=method, na_option=na_option, ascending=ascending)\n tm.assert_series_equal(result, Series(expected, dtype=exp_dtype))\n\n def test_rank_desc_mix_nans_infs(self):\n # GH 19538\n # check descending ranking when mix nans and infs\n iseries = Series([1, np.nan, np.inf, -np.inf, 25])\n result = iseries.rank(ascending=False)\n exp = Series([3, np.nan, 1, 4, 2], dtype="float64")\n tm.assert_series_equal(result, exp)\n\n @pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"])\n @pytest.mark.parametrize(\n "op, value",\n [\n [operator.add, 0],\n [operator.add, 1e6],\n [operator.mul, 1e-6],\n ],\n )\n def test_rank_methods_series(self, method, op, value):\n sp_stats = pytest.importorskip("scipy.stats")\n\n xs = np.random.default_rng(2).standard_normal(9)\n xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates\n np.random.default_rng(2).shuffle(xs)\n\n index = [chr(ord("a") + i) for i in range(len(xs))]\n vals = op(xs, value)\n ts = Series(vals, index=index)\n result = ts.rank(method=method)\n sprank = sp_stats.rankdata(vals, method if method != "first" else "ordinal")\n expected = Series(sprank, index=index).astype("float64")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "ser, exp",\n [\n ([1], [1]),\n ([2], [1]),\n ([0], [1]),\n ([2, 2], [1, 1]),\n ([1, 2, 3], [1, 2, 3]),\n ([4, 2, 1], [3, 2, 1]),\n ([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]),\n ([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5]),\n ],\n )\n def test_rank_dense_method(self, dtype, ser, exp):\n if ser[0] < 0 and dtype.startswith("str"):\n exp = exp[::-1]\n s = Series(ser).astype(dtype)\n result = s.rank(method="dense")\n expected = Series(exp).astype(expected_dtype(dtype, "dense"))\n tm.assert_series_equal(result, expected)\n\n def test_rank_descending(self, ser, results, dtype, using_infer_string):\n method, _ = results\n if dtype == "int64" or (not using_infer_string and dtype == "str"):\n s = ser.dropna()\n else:\n s = ser.astype(dtype)\n\n res = s.rank(ascending=False)\n if dtype.startswith("str"):\n expected = (s.astype("float64").max() - s.astype("float64")).rank()\n else:\n expected = (s.max() - s).rank()\n tm.assert_series_equal(res, expected.astype(expected_dtype(dtype, "average")))\n\n if dtype.startswith("str"):\n expected = (s.astype("float64").max() - s.astype("float64")).rank(\n method=method\n )\n else:\n expected = (s.max() - s).rank(method=method)\n res2 = s.rank(method=method, ascending=False)\n tm.assert_series_equal(res2, expected.astype(expected_dtype(dtype, method)))\n\n def test_rank_int(self, ser, results):\n method, exp = results\n s = ser.dropna().astype("i8")\n\n result = s.rank(method=method)\n expected = Series(exp).dropna()\n expected.index = result.index\n tm.assert_series_equal(result, expected)\n\n def test_rank_object_bug(self):\n # GH 13445\n\n # smoke tests\n Series([np.nan] * 32).astype(object).rank(ascending=True)\n Series([np.nan] * 32).astype(object).rank(ascending=False)\n\n def test_rank_modify_inplace(self):\n # GH 18521\n # Check rank does not mutate series\n s = Series([Timestamp("2017-01-05 10:20:27.569000"), NaT])\n expected = s.copy()\n\n s.rank()\n result = s\n tm.assert_series_equal(result, expected)\n\n def test_rank_ea_small_values(self):\n # GH#52471\n ser = Series(\n [5.4954145e29, -9.791984e-21, 9.3715776e-26, NA, 1.8790257e-28],\n dtype="Float64",\n )\n result = ser.rank(method="min")\n expected = Series([4, 1, 3, np.nan, 2])\n tm.assert_series_equal(result, expected)\n\n\n# GH15630, pct should be on 100% basis when method='dense'\n\n\n@pytest.mark.parametrize(\n "ser, exp",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.0, 1.0]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 2, 2.0 / 2, 2.0 / 2]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [1.0 / 3, 1.0 / 3, 3.0 / 3, 3.0 / 3, 2.0 / 3]),\n ([1, 1, 3, 3, 5, 5], [1.0 / 3, 1.0 / 3, 2.0 / 3, 2.0 / 3, 3.0 / 3, 3.0 / 3]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_dense_pct(dtype, ser, exp):\n if ser[0] < 0 and dtype.startswith("str"):\n exp = exp[::-1]\n s = Series(ser).astype(dtype)\n result = s.rank(method="dense", pct=True)\n expected = Series(exp).astype(expected_dtype(dtype, "dense", pct=True))\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ser, exp",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.0 / 2, 1.0 / 2]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 3, 2.0 / 3, 2.0 / 3]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [1.0 / 5, 1.0 / 5, 4.0 / 5, 4.0 / 5, 3.0 / 5]),\n ([1, 1, 3, 3, 5, 5], [1.0 / 6, 1.0 / 6, 3.0 / 6, 3.0 / 6, 5.0 / 6, 5.0 / 6]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_min_pct(dtype, ser, exp):\n if ser[0] < 0 and dtype.startswith("str"):\n exp = exp[::-1]\n s = Series(ser).astype(dtype)\n result = s.rank(method="min", pct=True)\n expected = Series(exp).astype(expected_dtype(dtype, "min", pct=True))\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ser, exp",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.0, 1.0]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 3, 3.0 / 3, 3.0 / 3]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [2.0 / 5, 2.0 / 5, 5.0 / 5, 5.0 / 5, 3.0 / 5]),\n ([1, 1, 3, 3, 5, 5], [2.0 / 6, 2.0 / 6, 4.0 / 6, 4.0 / 6, 6.0 / 6, 6.0 / 6]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_max_pct(dtype, ser, exp):\n if ser[0] < 0 and dtype.startswith("str"):\n exp = exp[::-1]\n s = Series(ser).astype(dtype)\n result = s.rank(method="max", pct=True)\n expected = Series(exp).astype(expected_dtype(dtype, "max", pct=True))\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ser, exp",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.5 / 2, 1.5 / 2]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 3, 2.5 / 3, 2.5 / 3]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [1.5 / 5, 1.5 / 5, 4.5 / 5, 4.5 / 5, 3.0 / 5]),\n ([1, 1, 3, 3, 5, 5], [1.5 / 6, 1.5 / 6, 3.5 / 6, 3.5 / 6, 5.5 / 6, 5.5 / 6]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_average_pct(dtype, ser, exp):\n if ser[0] < 0 and dtype.startswith("str"):\n exp = exp[::-1]\n s = Series(ser).astype(dtype)\n result = s.rank(method="average", pct=True)\n expected = Series(exp).astype(expected_dtype(dtype, "average", pct=True))\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ser, exp",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.0 / 2, 2.0 / 2.0]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [1.0 / 5, 2.0 / 5, 4.0 / 5, 5.0 / 5, 3.0 / 5]),\n ([1, 1, 3, 3, 5, 5], [1.0 / 6, 2.0 / 6, 3.0 / 6, 4.0 / 6, 5.0 / 6, 6.0 / 6]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_first_pct(dtype, ser, exp):\n if ser[0] < 0 and dtype.startswith("str"):\n exp = exp[::-1]\n s = Series(ser).astype(dtype)\n result = s.rank(method="first", pct=True)\n expected = Series(exp).astype(expected_dtype(dtype, "first", pct=True))\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.single_cpu\ndef test_pct_max_many_rows():\n # GH 18271\n s = Series(np.arange(2**24 + 1))\n result = s.rank(pct=True).max()\n assert result == 1\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_rank.py | test_rank.py | Python | 19,937 | 0.95 | 0.087034 | 0.047619 | node-utils | 508 | 2024-10-05T09:16:07.546154 | MIT | true | f456b6c4e37c2dbce5cade55de6ddf2e |
import numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n NA,\n Categorical,\n Float64Dtype,\n Index,\n MultiIndex,\n NaT,\n Period,\n PeriodIndex,\n RangeIndex,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n isna,\n)\nimport pandas._testing as tm\n\n\ndef test_reindex(datetime_series, string_series):\n identity = string_series.reindex(string_series.index)\n\n assert tm.shares_memory(string_series.index, identity.index)\n\n assert identity.index.is_(string_series.index)\n assert identity.index.identical(string_series.index)\n\n subIndex = string_series.index[10:20]\n subSeries = string_series.reindex(subIndex)\n\n for idx, val in subSeries.items():\n assert val == string_series[idx]\n\n subIndex2 = datetime_series.index[10:20]\n subTS = datetime_series.reindex(subIndex2)\n\n for idx, val in subTS.items():\n assert val == datetime_series[idx]\n stuffSeries = datetime_series.reindex(subIndex)\n\n assert np.isnan(stuffSeries).all()\n\n # This is extremely important for the Cython code to not screw up\n nonContigIndex = datetime_series.index[::2]\n subNonContig = datetime_series.reindex(nonContigIndex)\n for idx, val in subNonContig.items():\n assert val == datetime_series[idx]\n\n # return a copy the same index here\n result = datetime_series.reindex()\n assert result is not datetime_series\n\n\ndef test_reindex_nan():\n ts = Series([2, 3, 5, 7], index=[1, 4, np.nan, 8])\n\n i, j = [np.nan, 1, np.nan, 8, 4, np.nan], [2, 0, 2, 3, 1, 2]\n tm.assert_series_equal(ts.reindex(i), ts.iloc[j])\n\n ts.index = ts.index.astype("object")\n\n # reindex coerces index.dtype to float, loc/iloc doesn't\n tm.assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)\n\n\ndef test_reindex_series_add_nat():\n rng = date_range("1/1/2000 00:00:00", periods=10, freq="10s")\n series = Series(rng)\n\n result = series.reindex(range(15))\n assert np.issubdtype(result.dtype, np.dtype("M8[ns]"))\n\n mask = result.isna()\n assert mask[-5:].all()\n assert not mask[:-5].any()\n\n\ndef test_reindex_with_datetimes():\n rng = date_range("1/1/2000", periods=20)\n ts = Series(np.random.default_rng(2).standard_normal(20), index=rng)\n\n result = ts.reindex(list(ts.index[5:10]))\n expected = ts[5:10]\n expected.index = expected.index._with_freq(None)\n tm.assert_series_equal(result, expected)\n\n result = ts[list(ts.index[5:10])]\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_corner(datetime_series):\n # (don't forget to fix this) I think it's fixed\n empty = Series(index=[])\n empty.reindex(datetime_series.index, method="pad") # it works\n\n # corner case: pad empty series\n reindexed = empty.reindex(datetime_series.index, method="pad")\n\n # pass non-Index\n reindexed = datetime_series.reindex(list(datetime_series.index))\n datetime_series.index = datetime_series.index._with_freq(None)\n tm.assert_series_equal(datetime_series, reindexed)\n\n # bad fill method\n ts = datetime_series[::2]\n msg = (\n r"Invalid fill method\. Expecting pad \(ffill\), backfill "\n r"\(bfill\) or nearest\. Got foo"\n )\n with pytest.raises(ValueError, match=msg):\n ts.reindex(datetime_series.index, method="foo")\n\n\ndef test_reindex_pad():\n s = Series(np.arange(10), dtype="int64")\n s2 = s[::2]\n\n reindexed = s2.reindex(s.index, method="pad")\n reindexed2 = s2.reindex(s.index, method="ffill")\n tm.assert_series_equal(reindexed, reindexed2)\n\n expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8])\n tm.assert_series_equal(reindexed, expected)\n\n\ndef test_reindex_pad2():\n # GH4604\n s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", "d", "e"])\n new_index = ["a", "g", "c", "f"]\n expected = Series([1, 1, 3, 3], index=new_index)\n\n # this changes dtype because the ffill happens after\n result = s.reindex(new_index).ffill()\n tm.assert_series_equal(result, expected.astype("float64"))\n\n msg = "The 'downcast' keyword in ffill is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.reindex(new_index).ffill(downcast="infer")\n tm.assert_series_equal(result, expected)\n\n expected = Series([1, 5, 3, 5], index=new_index)\n result = s.reindex(new_index, method="ffill")\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_inference():\n # inference of new dtype\n s = Series([True, False, False, True], index=list("abcd"))\n new_index = "agc"\n msg = "Downcasting object dtype arrays on"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.reindex(list(new_index)).ffill()\n expected = Series([True, True, False], index=list(new_index))\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_downcasting():\n # GH4618 shifted series downcasting\n s = Series(False, index=range(5))\n msg = "Downcasting object dtype arrays on"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.shift(1).bfill()\n expected = Series(False, index=range(5))\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_nearest():\n s = Series(np.arange(10, dtype="int64"))\n target = [0.1, 0.9, 1.5, 2.0]\n result = s.reindex(target, method="nearest")\n expected = Series(np.around(target).astype("int64"), target)\n tm.assert_series_equal(expected, result)\n\n result = s.reindex(target, method="nearest", tolerance=0.2)\n expected = Series([0, 1, np.nan, 2], target)\n tm.assert_series_equal(expected, result)\n\n result = s.reindex(target, method="nearest", tolerance=[0.3, 0.01, 0.4, 3])\n expected = Series([0, np.nan, np.nan, 2], target)\n tm.assert_series_equal(expected, result)\n\n\ndef test_reindex_int(datetime_series):\n ts = datetime_series[::2]\n int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)\n\n # this should work fine\n reindexed_int = int_ts.reindex(datetime_series.index)\n\n # if NaNs introduced\n assert reindexed_int.dtype == np.float64\n\n # NO NaNs introduced\n reindexed_int = int_ts.reindex(int_ts.index[::2])\n assert reindexed_int.dtype == np.dtype(int)\n\n\ndef test_reindex_bool(datetime_series):\n # A series other than float, int, string, or object\n ts = datetime_series[::2]\n bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)\n\n # this should work fine\n reindexed_bool = bool_ts.reindex(datetime_series.index)\n\n # if NaNs introduced\n assert reindexed_bool.dtype == np.object_\n\n # NO NaNs introduced\n reindexed_bool = bool_ts.reindex(bool_ts.index[::2])\n assert reindexed_bool.dtype == np.bool_\n\n\ndef test_reindex_bool_pad(datetime_series):\n # fail\n ts = datetime_series[5:]\n bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)\n filled_bool = bool_ts.reindex(datetime_series.index, method="pad")\n assert isna(filled_bool[:5]).all()\n\n\ndef test_reindex_categorical():\n index = date_range("20000101", periods=3)\n\n # reindexing to an invalid Categorical\n s = Series(["a", "b", "c"], dtype="category")\n result = s.reindex(index)\n expected = Series(\n Categorical(values=[np.nan, np.nan, np.nan], categories=["a", "b", "c"])\n )\n expected.index = index\n tm.assert_series_equal(result, expected)\n\n # partial reindexing\n expected = Series(Categorical(values=["b", "c"], categories=["a", "b", "c"]))\n expected.index = [1, 2]\n result = s.reindex([1, 2])\n tm.assert_series_equal(result, expected)\n\n expected = Series(Categorical(values=["c", np.nan], categories=["a", "b", "c"]))\n expected.index = [2, 3]\n result = s.reindex([2, 3])\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_astype_order_consistency():\n # GH#17444\n ser = Series([1, 2, 3], index=[2, 0, 1])\n new_index = [0, 1, 2]\n temp_dtype = "category"\n new_dtype = str\n result = ser.reindex(new_index).astype(temp_dtype).astype(new_dtype)\n expected = ser.astype(temp_dtype).reindex(new_index).astype(new_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_fill_value():\n # -----------------------------------------------------------\n # floats\n floats = Series([1.0, 2.0, 3.0])\n result = floats.reindex([1, 2, 3])\n expected = Series([2.0, 3.0, np.nan], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n result = floats.reindex([1, 2, 3], fill_value=0)\n expected = Series([2.0, 3.0, 0], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n # -----------------------------------------------------------\n # ints\n ints = Series([1, 2, 3])\n\n result = ints.reindex([1, 2, 3])\n expected = Series([2.0, 3.0, np.nan], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n # don't upcast\n result = ints.reindex([1, 2, 3], fill_value=0)\n expected = Series([2, 3, 0], index=[1, 2, 3])\n assert issubclass(result.dtype.type, np.integer)\n tm.assert_series_equal(result, expected)\n\n # -----------------------------------------------------------\n # objects\n objects = Series([1, 2, 3], dtype=object)\n\n result = objects.reindex([1, 2, 3])\n expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)\n tm.assert_series_equal(result, expected)\n\n result = objects.reindex([1, 2, 3], fill_value="foo")\n expected = Series([2, 3, "foo"], index=[1, 2, 3], dtype=object)\n tm.assert_series_equal(result, expected)\n\n # ------------------------------------------------------------\n # bools\n bools = Series([True, False, True])\n\n result = bools.reindex([1, 2, 3])\n expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)\n tm.assert_series_equal(result, expected)\n\n result = bools.reindex([1, 2, 3], fill_value=False)\n expected = Series([False, True, False], index=[1, 2, 3])\n tm.assert_series_equal(result, expected)\n\n\n@td.skip_array_manager_not_yet_implemented\n@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])\n@pytest.mark.parametrize("fill_value", ["string", 0, Timedelta(0)])\ndef test_reindex_fill_value_datetimelike_upcast(dtype, fill_value, using_array_manager):\n # https://github.com/pandas-dev/pandas/issues/42921\n if dtype == "timedelta64[ns]" and fill_value == Timedelta(0):\n # use the scalar that is not compatible with the dtype for this test\n fill_value = Timestamp(0)\n\n ser = Series([NaT], dtype=dtype)\n\n result = ser.reindex([0, 1], fill_value=fill_value)\n expected = Series([NaT, fill_value], index=[0, 1], dtype=object)\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_datetimeindexes_tz_naive_and_aware():\n # GH 8306\n idx = date_range("20131101", tz="America/Chicago", periods=7)\n newidx = date_range("20131103", periods=10, freq="h")\n s = Series(range(7), index=idx)\n msg = (\n r"Cannot compare dtypes datetime64\[ns, America/Chicago\] "\n r"and datetime64\[ns\]"\n )\n with pytest.raises(TypeError, match=msg):\n s.reindex(newidx, method="ffill")\n\n\ndef test_reindex_empty_series_tz_dtype():\n # GH 20869\n result = Series(dtype="datetime64[ns, UTC]").reindex([0, 1])\n expected = Series([NaT] * 2, dtype="datetime64[ns, UTC]")\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "p_values, o_values, values, expected_values",\n [\n (\n [Period("2019Q1", "Q-DEC"), Period("2019Q2", "Q-DEC")],\n [Period("2019Q1", "Q-DEC"), Period("2019Q2", "Q-DEC"), "All"],\n [1.0, 1.0],\n [1.0, 1.0, np.nan],\n ),\n (\n [Period("2019Q1", "Q-DEC"), Period("2019Q2", "Q-DEC")],\n [Period("2019Q1", "Q-DEC"), Period("2019Q2", "Q-DEC")],\n [1.0, 1.0],\n [1.0, 1.0],\n ),\n ],\n)\ndef test_reindex_periodindex_with_object(p_values, o_values, values, expected_values):\n # GH#28337\n period_index = PeriodIndex(p_values)\n object_index = Index(o_values)\n\n ser = Series(values, index=period_index)\n result = ser.reindex(object_index)\n expected = Series(expected_values, index=object_index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_too_many_args():\n # GH 40980\n ser = Series([1, 2])\n msg = r"reindex\(\) takes from 1 to 2 positional arguments but 3 were given"\n with pytest.raises(TypeError, match=msg):\n ser.reindex([2, 3], False)\n\n\ndef test_reindex_double_index():\n # GH 40980\n ser = Series([1, 2])\n msg = r"reindex\(\) got multiple values for argument 'index'"\n with pytest.raises(TypeError, match=msg):\n ser.reindex([2, 3], index=[3, 4])\n\n\ndef test_reindex_no_posargs():\n # GH 40980\n ser = Series([1, 2])\n result = ser.reindex(index=[1, 0])\n expected = Series([2, 1], index=[1, 0])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("values", [[["a"], ["x"]], [[], []]])\ndef test_reindex_empty_with_level(values):\n # GH41170\n ser = Series(\n range(len(values[0])), index=MultiIndex.from_arrays(values), dtype="object"\n )\n result = ser.reindex(np.array(["b"]), level=0)\n expected = Series(\n index=MultiIndex(levels=[["b"], values[1]], codes=[[], []]), dtype="object"\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_missing_category():\n # GH#18185\n ser = Series([1, 2, 3, 1], dtype="category")\n msg = r"Cannot setitem on a Categorical with a new category \(-1\)"\n with pytest.raises(TypeError, match=msg):\n ser.reindex([1, 2, 3, 4, 5], fill_value=-1)\n\n\ndef test_reindexing_with_float64_NA_log():\n # GH 47055\n s = Series([1.0, NA], dtype=Float64Dtype())\n s_reindex = s.reindex(range(3))\n result = s_reindex.values._data\n expected = np.array([1, np.nan, np.nan])\n tm.assert_numpy_array_equal(result, expected)\n with tm.assert_produces_warning(None):\n result_log = np.log(s_reindex)\n expected_log = Series([0, np.nan, np.nan], dtype=Float64Dtype())\n tm.assert_series_equal(result_log, expected_log)\n\n\n@pytest.mark.parametrize("dtype", ["timedelta64", "datetime64"])\ndef test_reindex_expand_nonnano_nat(dtype):\n # GH 53497\n ser = Series(np.array([1], dtype=f"{dtype}[s]"))\n result = ser.reindex(RangeIndex(2))\n expected = Series(\n np.array([1, getattr(np, dtype)("nat", "s")], dtype=f"{dtype}[s]")\n )\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_reindex.py | test_reindex.py | Python | 14,417 | 0.95 | 0.081264 | 0.126844 | awesome-app | 760 | 2024-02-05T23:30:41.249787 | GPL-3.0 | true | 17b8d1276f0c1da1bc576af670d056df |
from datetime import datetime\n\nimport numpy as np\n\nfrom pandas import Series\nimport pandas._testing as tm\n\n\ndef test_reindex_like(datetime_series):\n other = datetime_series[::2]\n tm.assert_series_equal(\n datetime_series.reindex(other.index), datetime_series.reindex_like(other)\n )\n\n # GH#7179\n day1 = datetime(2013, 3, 5)\n day2 = datetime(2013, 5, 5)\n day3 = datetime(2014, 3, 5)\n\n series1 = Series([5, None, None], [day1, day2, day3])\n series2 = Series([None, None], [day1, day3])\n\n result = series1.reindex_like(series2, method="pad")\n expected = Series([5, np.nan], index=[day1, day3])\n tm.assert_series_equal(result, expected)\n\n\ndef test_reindex_like_nearest():\n ser = Series(np.arange(10, dtype="int64"))\n\n target = [0.1, 0.9, 1.5, 2.0]\n other = ser.reindex(target, method="nearest")\n expected = Series(np.around(target).astype("int64"), target)\n\n result = ser.reindex_like(other, method="nearest")\n tm.assert_series_equal(expected, result)\n\n result = ser.reindex_like(other, method="nearest", tolerance=1)\n tm.assert_series_equal(expected, result)\n result = ser.reindex_like(other, method="nearest", tolerance=[1, 2, 3, 4])\n tm.assert_series_equal(expected, result)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_reindex_like.py | test_reindex_like.py | Python | 1,245 | 0.95 | 0.04878 | 0.034483 | python-kit | 236 | 2024-01-19T18:58:46.691365 | GPL-3.0 | true | 1460939a1a02bf357d1e4a02e916a581 |
from datetime import datetime\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n Index,\n MultiIndex,\n Series,\n array,\n)\nimport pandas._testing as tm\n\n\nclass TestRename:\n def test_rename(self, datetime_series):\n ts = datetime_series\n renamer = lambda x: x.strftime("%Y%m%d")\n renamed = ts.rename(renamer)\n assert renamed.index[0] == renamer(ts.index[0])\n\n # dict\n rename_dict = dict(zip(ts.index, renamed.index))\n renamed2 = ts.rename(rename_dict)\n tm.assert_series_equal(renamed, renamed2)\n\n def test_rename_partial_dict(self):\n # partial dict\n ser = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64")\n renamed = ser.rename({"b": "foo", "d": "bar"})\n tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"]))\n\n def test_rename_retain_index_name(self):\n # index with name\n renamer = Series(\n np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64"\n )\n renamed = renamer.rename({})\n assert renamed.index.name == renamer.index.name\n\n def test_rename_by_series(self):\n ser = Series(range(5), name="foo")\n renamer = Series({1: 10, 2: 20})\n result = ser.rename(renamer)\n expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo")\n tm.assert_series_equal(result, expected)\n\n def test_rename_set_name(self, using_infer_string):\n ser = Series(range(4), index=list("abcd"))\n for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:\n result = ser.rename(name)\n assert result.name == name\n if using_infer_string:\n tm.assert_extension_array_equal(result.index.values, ser.index.values)\n else:\n tm.assert_numpy_array_equal(result.index.values, ser.index.values)\n assert ser.name is None\n\n def test_rename_set_name_inplace(self, using_infer_string):\n ser = Series(range(3), index=list("abc"))\n for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:\n ser.rename(name, inplace=True)\n assert ser.name == name\n exp = np.array(["a", "b", "c"], dtype=np.object_)\n if using_infer_string:\n exp = array(exp, dtype="str")\n tm.assert_extension_array_equal(ser.index.values, exp)\n else:\n tm.assert_numpy_array_equal(ser.index.values, exp)\n\n def test_rename_axis_supported(self):\n # Supporting axis for compatibility, detailed in GH-18589\n ser = Series(range(5))\n ser.rename({}, axis=0)\n ser.rename({}, axis="index")\n\n with pytest.raises(ValueError, match="No axis named 5"):\n ser.rename({}, axis=5)\n\n def test_rename_inplace(self, datetime_series):\n renamer = lambda x: x.strftime("%Y%m%d")\n expected = renamer(datetime_series.index[0])\n\n datetime_series.rename(renamer, inplace=True)\n assert datetime_series.index[0] == expected\n\n def test_rename_with_custom_indexer(self):\n # GH 27814\n class MyIndexer:\n pass\n\n ix = MyIndexer()\n ser = Series([1, 2, 3]).rename(ix)\n assert ser.name is ix\n\n def test_rename_with_custom_indexer_inplace(self):\n # GH 27814\n class MyIndexer:\n pass\n\n ix = MyIndexer()\n ser = Series([1, 2, 3])\n ser.rename(ix, inplace=True)\n assert ser.name is ix\n\n def test_rename_callable(self):\n # GH 17407\n ser = Series(range(1, 6), index=Index(range(2, 7), name="IntIndex"))\n result = ser.rename(str)\n expected = ser.rename(lambda i: str(i))\n tm.assert_series_equal(result, expected)\n\n assert result.name == expected.name\n\n def test_rename_none(self):\n # GH 40977\n ser = Series([1, 2], name="foo")\n result = ser.rename(None)\n expected = Series([1, 2])\n tm.assert_series_equal(result, expected)\n\n def test_rename_series_with_multiindex(self):\n # issue #43659\n arrays = [\n ["bar", "baz", "baz", "foo", "qux"],\n ["one", "one", "two", "two", "one"],\n ]\n\n index = MultiIndex.from_arrays(arrays, names=["first", "second"])\n ser = Series(np.ones(5), index=index)\n result = ser.rename(index={"one": "yes"}, level="second", errors="raise")\n\n arrays_expected = [\n ["bar", "baz", "baz", "foo", "qux"],\n ["yes", "yes", "two", "two", "yes"],\n ]\n\n index_expected = MultiIndex.from_arrays(\n arrays_expected, names=["first", "second"]\n )\n series_expected = Series(np.ones(5), index=index_expected)\n\n tm.assert_series_equal(result, series_expected)\n\n def test_rename_series_with_multiindex_keeps_ea_dtypes(self):\n # GH21055\n arrays = [\n Index([1, 2, 3], dtype="Int64").astype("category"),\n Index([1, 2, 3], dtype="Int64"),\n ]\n mi = MultiIndex.from_arrays(arrays, names=["A", "B"])\n ser = Series(1, index=mi)\n result = ser.rename({1: 4}, level=1)\n\n arrays_expected = [\n Index([1, 2, 3], dtype="Int64").astype("category"),\n Index([4, 2, 3], dtype="Int64"),\n ]\n mi_expected = MultiIndex.from_arrays(arrays_expected, names=["A", "B"])\n expected = Series(1, index=mi_expected)\n\n tm.assert_series_equal(result, expected)\n\n def test_rename_error_arg(self):\n # GH 46889\n ser = Series(["foo", "bar"])\n match = re.escape("[2] not found in axis")\n with pytest.raises(KeyError, match=match):\n ser.rename({2: 9}, errors="raise")\n\n def test_rename_copy_false(self, using_copy_on_write, warn_copy_on_write):\n # GH 46889\n ser = Series(["foo", "bar"])\n ser_orig = ser.copy()\n shallow_copy = ser.rename({1: 9}, copy=False)\n with tm.assert_cow_warning(warn_copy_on_write):\n ser[0] = "foobar"\n if using_copy_on_write:\n assert ser_orig[0] == shallow_copy[0]\n assert ser_orig[1] == shallow_copy[9]\n else:\n assert ser[0] == shallow_copy[0]\n assert ser[1] == shallow_copy[9]\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_rename.py | test_rename.py | Python | 6,303 | 0.95 | 0.13587 | 0.078431 | vue-tools | 242 | 2023-12-30T17:17:24.858136 | Apache-2.0 | true | d76c4443626c56c8e63eadfd4f4a3f5c |
import pytest\n\nfrom pandas import (\n Index,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestSeriesRenameAxis:\n def test_rename_axis_mapper(self):\n # GH 19978\n mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])\n ser = Series(list(range(len(mi))), index=mi)\n\n result = ser.rename_axis(index={"ll": "foo"})\n assert result.index.names == ["foo", "nn"]\n\n result = ser.rename_axis(index=str.upper, axis=0)\n assert result.index.names == ["LL", "NN"]\n\n result = ser.rename_axis(index=["foo", "goo"])\n assert result.index.names == ["foo", "goo"]\n\n with pytest.raises(TypeError, match="unexpected"):\n ser.rename_axis(columns="wrong")\n\n def test_rename_axis_inplace(self, datetime_series):\n # GH 15704\n expected = datetime_series.rename_axis("foo")\n result = datetime_series\n no_return = result.rename_axis("foo", inplace=True)\n\n assert no_return is None\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}])\n def test_rename_axis_none(self, kwargs):\n # GH 25034\n index = Index(list("abc"), name="foo")\n ser = Series([1, 2, 3], index=index)\n\n result = ser.rename_axis(**kwargs)\n expected_index = index.rename(None) if kwargs else index\n expected = Series([1, 2, 3], index=expected_index)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_rename_axis.py | test_rename_axis.py | Python | 1,520 | 0.95 | 0.106383 | 0.083333 | python-kit | 953 | 2023-11-23T02:58:14.145604 | Apache-2.0 | true | 8291867b4adbf144ed63aef27e636f8a |
import numpy as np\nimport pytest\n\nfrom pandas import (\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestRepeat:\n def test_repeat(self):\n ser = Series(np.random.default_rng(2).standard_normal(3), index=["a", "b", "c"])\n\n reps = ser.repeat(5)\n exp = Series(ser.values.repeat(5), index=ser.index.values.repeat(5))\n tm.assert_series_equal(reps, exp)\n\n to_rep = [2, 3, 4]\n reps = ser.repeat(to_rep)\n exp = Series(ser.values.repeat(to_rep), index=ser.index.values.repeat(to_rep))\n tm.assert_series_equal(reps, exp)\n\n def test_numpy_repeat(self):\n ser = Series(np.arange(3), name="x")\n expected = Series(\n ser.values.repeat(2), name="x", index=ser.index.values.repeat(2)\n )\n tm.assert_series_equal(np.repeat(ser, 2), expected)\n\n msg = "the 'axis' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n np.repeat(ser, 2, axis=0)\n\n def test_repeat_with_multiindex(self):\n # GH#9361, fixed by GH#7891\n m_idx = MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6), (7, 8)])\n data = ["a", "b", "c", "d"]\n m_df = Series(data, index=m_idx)\n assert m_df.repeat(3).shape == (3 * len(data),)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_repeat.py | test_repeat.py | Python | 1,274 | 0.95 | 0.1 | 0.03125 | python-kit | 23 | 2023-09-20T10:04:33.654723 | BSD-3-Clause | true | b8ee00285a5b23a769e135a4daf1b17b |
import re\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.arrays import IntervalArray\n\n\nclass TestSeriesReplace:\n def test_replace_explicit_none(self):\n # GH#36984 if the user explicitly passes value=None, give it to them\n ser = pd.Series([0, 0, ""], dtype=object)\n result = ser.replace("", None)\n expected = pd.Series([0, 0, None], dtype=object)\n tm.assert_series_equal(result, expected)\n\n # Cast column 2 to object to avoid implicit cast when setting entry to ""\n df = pd.DataFrame(np.zeros((3, 3))).astype({2: object})\n df.iloc[2, 2] = ""\n result = df.replace("", None)\n expected = pd.DataFrame(\n {\n 0: np.zeros(3),\n 1: np.zeros(3),\n 2: np.array([0.0, 0.0, None], dtype=object),\n }\n )\n assert expected.iloc[2, 2] is None\n tm.assert_frame_equal(result, expected)\n\n # GH#19998 same thing with object dtype\n ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])\n result = ser.replace("a", None)\n expected = pd.Series([10, 20, 30, None, None, "b", None])\n assert expected.iloc[-1] is None\n tm.assert_series_equal(result, expected)\n\n def test_replace_noop_doesnt_downcast(self):\n # GH#44498\n ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)\n res = ser.replace({np.nan: None}) # should be a no-op\n tm.assert_series_equal(res, ser)\n assert res.dtype == object\n\n # same thing but different calling convention\n res = ser.replace(np.nan, None)\n tm.assert_series_equal(res, ser)\n assert res.dtype == object\n\n def test_replace(self):\n N = 50\n ser = pd.Series(np.random.default_rng(2).standard_normal(N))\n ser[0:4] = np.nan\n ser[6:10] = 0\n\n # replace list with a single value\n return_value = ser.replace([np.nan], -1, inplace=True)\n assert return_value is None\n\n exp = ser.fillna(-1)\n tm.assert_series_equal(ser, exp)\n\n rs = ser.replace(0.0, np.nan)\n ser[ser == 0.0] = np.nan\n tm.assert_series_equal(rs, ser)\n\n ser = pd.Series(\n np.fabs(np.random.default_rng(2).standard_normal(N)),\n pd.date_range("2020-01-01", periods=N),\n dtype=object,\n )\n ser[:5] = np.nan\n ser[6:10] = "foo"\n ser[20:30] = "bar"\n\n # replace list with a single value\n msg = "Downcasting behavior in `replace`"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = ser.replace([np.nan, "foo", "bar"], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n with tm.assert_produces_warning(FutureWarning, match=msg):\n return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)\n assert return_value is None\n\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n def test_replace_nan_with_inf(self):\n ser = pd.Series([np.nan, 0, np.inf])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n filled = ser.copy()\n filled[4] = 0\n tm.assert_series_equal(ser.replace(np.inf, 0), filled)\n\n def test_replace_listlike_value_listlike_target(self, datetime_series):\n ser = pd.Series(datetime_series.index)\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n # malformed\n msg = r"Replacement lists must match in length\. Expecting 3 got 2"\n with pytest.raises(ValueError, match=msg):\n ser.replace([1, 2, 3], [np.nan, 0])\n\n # ser is dt64 so can't hold 1 or 2, so this replace is a no-op\n result = ser.replace([1, 2], [np.nan, 0])\n tm.assert_series_equal(result, ser)\n\n ser = pd.Series([0, 1, 2, 3, 4])\n result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])\n tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))\n\n def test_replace_gh5319(self):\n # API change from 0.12?\n # GH 5319\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n msg = (\n "Series.replace without 'value' and with non-dict-like "\n "'to_replace' is deprecated"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser.replace([np.nan])\n tm.assert_series_equal(result, expected)\n\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser.replace(np.nan)\n tm.assert_series_equal(result, expected)\n\n def test_replace_datetime64(self):\n # GH 5797\n ser = pd.Series(pd.date_range("20130101", periods=5))\n expected = ser.copy()\n expected.loc[2] = pd.Timestamp("20120101")\n result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})\n tm.assert_series_equal(result, expected)\n result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))\n tm.assert_series_equal(result, expected)\n\n def test_replace_nat_with_tz(self):\n # GH 11792: Test with replacing NaT in a list with tz data\n ts = pd.Timestamp("2015/01/01", tz="UTC")\n s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])\n result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)\n expected = pd.Series([pd.Timestamp.min, ts], dtype=object)\n tm.assert_series_equal(expected, result)\n\n def test_replace_timedelta_td64(self):\n tdi = pd.timedelta_range(0, periods=5)\n ser = pd.Series(tdi)\n\n # Using a single dict argument means we go through replace_list\n result = ser.replace({ser[1]: ser[3]})\n\n expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])\n tm.assert_series_equal(result, expected)\n\n def test_replace_with_single_list(self):\n ser = pd.Series([0, 1, 2, 3, 4])\n msg2 = (\n "Series.replace without 'value' and with non-dict-like "\n "'to_replace' is deprecated"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n result = ser.replace([1, 2, 3])\n tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))\n\n s = ser.copy()\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n return_value = s.replace([1, 2, 3], inplace=True)\n assert return_value is None\n tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))\n\n # make sure things don't get corrupted when fillna call fails\n s = ser.copy()\n msg = (\n r"Invalid fill method\. Expecting pad \(ffill\) or backfill "\n r"\(bfill\)\. Got crash_cymbal"\n )\n msg3 = "The 'method' keyword in Series.replace is deprecated"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=msg3):\n return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")\n assert return_value is None\n tm.assert_series_equal(s, ser)\n\n def test_replace_mixed_types(self):\n ser = pd.Series(np.arange(5), dtype="int64")\n\n def check_replace(to_rep, val, expected):\n sc = ser.copy()\n result = ser.replace(to_rep, val)\n return_value = sc.replace(to_rep, val, inplace=True)\n assert return_value is None\n tm.assert_series_equal(expected, result)\n tm.assert_series_equal(expected, sc)\n\n # 3.0 can still be held in our int64 series, so we do not upcast GH#44940\n tr, v = [3], [3.0]\n check_replace(tr, v, ser)\n # Note this matches what we get with the scalars 3 and 3.0\n check_replace(tr[0], v[0], ser)\n\n # MUST upcast to float\n e = pd.Series([0, 1, 2, 3.5, 4])\n tr, v = [3], [3.5]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, "a"])\n tr, v = [3, 4], [3.5, "a"]\n check_replace(tr, v, e)\n\n # again casts to object\n e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])\n tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, True], dtype="object")\n tr, v = [3, 4], [3.5, True]\n check_replace(tr, v, e)\n\n # test an object with dates + floats + integers + strings\n dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))\n result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])\n expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_replace_bool_with_string_no_op(self):\n s = pd.Series([True, False, True])\n result = s.replace("fun", "in-the-sun")\n tm.assert_series_equal(s, result)\n\n def test_replace_bool_with_string(self):\n # nonexistent elements\n s = pd.Series([True, False, True])\n result = s.replace(True, "2u")\n expected = pd.Series(["2u", False, "2u"])\n tm.assert_series_equal(expected, result)\n\n def test_replace_bool_with_bool(self):\n s = pd.Series([True, False, True])\n result = s.replace(True, False)\n expected = pd.Series([False] * len(s))\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_dict_with_bool_keys(self):\n s = pd.Series([True, False, True])\n result = s.replace({"asdf": "asdb", True: "yes"})\n expected = pd.Series(["yes", False, "yes"])\n tm.assert_series_equal(result, expected)\n\n def test_replace_Int_with_na(self, any_int_ea_dtype):\n # GH 38267\n result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)\n expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)\n tm.assert_series_equal(result, expected)\n result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)\n result.replace(1, pd.NA, inplace=True)\n tm.assert_series_equal(result, expected)\n\n def test_replace2(self):\n N = 50\n ser = pd.Series(\n np.fabs(np.random.default_rng(2).standard_normal(N)),\n pd.date_range("2020-01-01", periods=N),\n dtype=object,\n )\n ser[:5] = np.nan\n ser[6:10] = "foo"\n ser[20:30] = "bar"\n\n # replace list with a single value\n msg = "Downcasting behavior in `replace`"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = ser.replace([np.nan, "foo", "bar"], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n with tm.assert_produces_warning(FutureWarning, match=msg):\n return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)\n assert return_value is None\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n @pytest.mark.parametrize("inplace", [True, False])\n def test_replace_cascade(self, inplace):\n # Test that replaced values are not replaced again\n # GH #50778\n ser = pd.Series([1, 2, 3])\n expected = pd.Series([2, 3, 4])\n\n res = ser.replace([1, 2, 3], [2, 3, 4], inplace=inplace)\n if inplace:\n tm.assert_series_equal(ser, expected)\n else:\n tm.assert_series_equal(res, expected)\n\n def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):\n # GH 32621, GH#44940\n ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)\n expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)\n result = ser.replace({"one": "1", "two": "2"})\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_empty_dictlike(self):\n # GH 15289\n s = pd.Series(list("abcd"))\n tm.assert_series_equal(s, s.replace({}))\n\n empty_series = pd.Series([])\n tm.assert_series_equal(s, s.replace(empty_series))\n\n def test_replace_string_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace("2", np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_replacer_equals_replacement(self):\n # GH 20656\n # make sure all replacers are matching against original values\n s = pd.Series(["a", "b"])\n expected = pd.Series(["b", "a"])\n result = s.replace({"a": "b", "b": "a"})\n tm.assert_series_equal(expected, result)\n\n def test_replace_unicode_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace("2", np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_mixed_types_with_string(self):\n # Testing mixed\n s = pd.Series([1, 2, 3, "4", 4, 5])\n msg = "Downcasting behavior in `replace`"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.replace([2, "4"], np.nan)\n expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])\n tm.assert_series_equal(expected, result)\n\n @pytest.mark.parametrize(\n "categorical, numeric",\n [\n (pd.Categorical(["A"], categories=["A", "B"]), [1]),\n (pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]),\n ],\n )\n def test_replace_categorical(self, categorical, numeric, using_infer_string):\n # GH 24971, GH#23305\n ser = pd.Series(categorical)\n msg = "Downcasting behavior in `replace`"\n msg = "with CategoricalDtype is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = ser.replace({"A": 1, "B": 2})\n expected = pd.Series(numeric).astype("category")\n if 2 not in expected.cat.categories:\n # i.e. categories should be [1, 2] even if there are no "B"s present\n # GH#44940\n expected = expected.cat.add_categories(2)\n tm.assert_series_equal(expected, result)\n\n @pytest.mark.parametrize(\n "data, data_exp", [(["a", "b", "c"], ["b", "b", "c"]), (["a"], ["b"])]\n )\n def test_replace_categorical_inplace(self, data, data_exp):\n # GH 53358\n result = pd.Series(data, dtype="category")\n msg = "with CategoricalDtype is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result.replace(to_replace="a", value="b", inplace=True)\n expected = pd.Series(data_exp, dtype="category")\n tm.assert_series_equal(result, expected)\n\n def test_replace_categorical_single(self):\n # GH 26988\n dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")\n s = pd.Series(dti)\n c = s.astype("category")\n\n expected = c.copy()\n expected = expected.cat.add_categories("foo")\n expected[2] = "foo"\n expected = expected.cat.remove_unused_categories()\n assert c[2] != "foo"\n\n msg = "with CategoricalDtype is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = c.replace(c[2], "foo")\n tm.assert_series_equal(expected, result)\n assert c[2] != "foo" # ensure non-inplace call does not alter original\n\n msg = "with CategoricalDtype is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n return_value = c.replace(c[2], "foo", inplace=True)\n assert return_value is None\n tm.assert_series_equal(expected, c)\n\n first_value = c[0]\n msg = "with CategoricalDtype is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n return_value = c.replace(c[1], c[0], inplace=True)\n assert return_value is None\n assert c[0] == c[1] == first_value # test replacing with existing value\n\n def test_replace_with_no_overflowerror(self):\n # GH 25616\n # casts to object without Exception from OverflowError\n s = pd.Series([0, 1, 2, 3, 4])\n result = s.replace([3], ["100000000000000000000"])\n expected = pd.Series([0, 1, 2, "100000000000000000000", 4])\n tm.assert_series_equal(result, expected)\n\n s = pd.Series([0, "100000000000000000000", "100000000000000000001"])\n result = s.replace(["100000000000000000000"], [1])\n expected = pd.Series([0, 1, "100000000000000000001"])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "ser, to_replace, exp",\n [\n ([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),\n (["1", "2", "3"], {"1": "2", "2": "3", "3": "4"}, ["2", "3", "4"]),\n ],\n )\n def test_replace_commutative(self, ser, to_replace, exp):\n # GH 16051\n # DataFrame.replace() overwrites when values are non-numeric\n\n series = pd.Series(ser)\n\n expected = pd.Series(exp)\n result = series.replace(to_replace)\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "ser, exp", [([1, 2, 3], [1, True, 3]), (["x", 2, 3], ["x", True, 3])]\n )\n def test_replace_no_cast(self, ser, exp):\n # GH 9113\n # BUG: replace int64 dtype with bool coerces to int64\n\n series = pd.Series(ser)\n result = series.replace(2, True)\n expected = pd.Series(exp)\n\n tm.assert_series_equal(result, expected)\n\n def test_replace_invalid_to_replace(self):\n # GH 18634\n # API: replace() should raise an exception if invalid argument is given\n series = pd.Series(["a", "b", "c "])\n msg = (\n r"Expecting 'to_replace' to be either a scalar, array-like, "\n r"dict or None, got invalid type.*"\n )\n msg2 = (\n "Series.replace without 'value' and with non-dict-like "\n "'to_replace' is deprecated"\n )\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n series.replace(lambda x: x.strip())\n\n @pytest.mark.parametrize("frame", [False, True])\n def test_replace_nonbool_regex(self, frame):\n obj = pd.Series(["a", "b", "c "])\n if frame:\n obj = obj.to_frame()\n\n msg = "'to_replace' must be 'None' if 'regex' is not a bool"\n with pytest.raises(ValueError, match=msg):\n obj.replace(to_replace=["a"], regex="foo")\n\n @pytest.mark.parametrize("frame", [False, True])\n def test_replace_empty_copy(self, frame):\n obj = pd.Series([], dtype=np.float64)\n if frame:\n obj = obj.to_frame()\n\n res = obj.replace(4, 5, inplace=True)\n assert res is None\n\n res = obj.replace(4, 5, inplace=False)\n tm.assert_equal(res, obj)\n assert res is not obj\n\n def test_replace_only_one_dictlike_arg(self, fixed_now_ts):\n # GH#33340\n\n ser = pd.Series([1, 2, "A", fixed_now_ts, True])\n to_replace = {0: 1, 2: "A"}\n value = "foo"\n msg = "Series.replace cannot use dict-like to_replace and non-None value"\n with pytest.raises(ValueError, match=msg):\n ser.replace(to_replace, value)\n\n to_replace = 1\n value = {0: "foo", 2: "bar"}\n msg = "Series.replace cannot use dict-value and non-None to_replace"\n with pytest.raises(ValueError, match=msg):\n ser.replace(to_replace, value)\n\n def test_replace_extension_other(self, frame_or_series):\n # https://github.com/pandas-dev/pandas/issues/34530\n obj = frame_or_series(pd.array([1, 2, 3], dtype="Int64"))\n result = obj.replace("", "") # no exception\n # should not have changed dtype\n tm.assert_equal(obj, result)\n\n def _check_replace_with_method(self, ser: pd.Series):\n df = ser.to_frame()\n\n msg1 = "The 'method' keyword in Series.replace is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg1):\n res = ser.replace(ser[1], method="pad")\n expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)\n tm.assert_series_equal(res, expected)\n\n msg2 = "The 'method' keyword in DataFrame.replace is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n res_df = df.replace(ser[1], method="pad")\n tm.assert_frame_equal(res_df, expected.to_frame())\n\n ser2 = ser.copy()\n with tm.assert_produces_warning(FutureWarning, match=msg1):\n res2 = ser2.replace(ser[1], method="pad", inplace=True)\n assert res2 is None\n tm.assert_series_equal(ser2, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n res_df2 = df.replace(ser[1], method="pad", inplace=True)\n assert res_df2 is None\n tm.assert_frame_equal(df, expected.to_frame())\n\n def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype):\n arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)\n ser = pd.Series(arr)\n\n self._check_replace_with_method(ser)\n\n @pytest.mark.parametrize("as_categorical", [True, False])\n def test_replace_interval_with_method(self, as_categorical):\n # in particular interval that can't hold NA\n\n idx = pd.IntervalIndex.from_breaks(range(4))\n ser = pd.Series(idx)\n if as_categorical:\n ser = ser.astype("category")\n\n self._check_replace_with_method(ser)\n\n @pytest.mark.parametrize("as_period", [True, False])\n @pytest.mark.parametrize("as_categorical", [True, False])\n def test_replace_datetimelike_with_method(self, as_period, as_categorical):\n idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific")\n if as_period:\n idx = idx.tz_localize(None).to_period("D")\n\n ser = pd.Series(idx)\n ser.iloc[-2] = pd.NaT\n if as_categorical:\n ser = ser.astype("category")\n\n self._check_replace_with_method(ser)\n\n def test_replace_with_compiled_regex(self):\n # https://github.com/pandas-dev/pandas/issues/35680\n s = pd.Series(["a", "b", "c"])\n regex = re.compile("^a$")\n result = s.replace({regex: "z"}, regex=True)\n expected = pd.Series(["z", "b", "c"])\n tm.assert_series_equal(result, expected)\n\n def test_pandas_replace_na(self):\n # GH#43344\n ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA], dtype="string")\n regex_mapping = {\n "AA": "CC",\n "BB": "CC",\n "EE": "CC",\n "CC": "CC-REPL",\n }\n result = ser.replace(regex_mapping, regex=True)\n exp = pd.Series(["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA], dtype="string")\n tm.assert_series_equal(result, exp)\n\n @pytest.mark.parametrize(\n "dtype, input_data, to_replace, expected_data",\n [\n ("bool", [True, False], {True: False}, [False, False]),\n ("int64", [1, 2], {1: 10, 2: 20}, [10, 20]),\n ("Int64", [1, 2], {1: 10, 2: 20}, [10, 20]),\n ("float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),\n ("Float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),\n ("string", ["one", "two"], {"one": "1", "two": "2"}, ["1", "2"]),\n (\n pd.IntervalDtype("int64"),\n IntervalArray([pd.Interval(1, 2), pd.Interval(2, 3)]),\n {pd.Interval(1, 2): pd.Interval(10, 20)},\n IntervalArray([pd.Interval(10, 20), pd.Interval(2, 3)]),\n ),\n (\n pd.IntervalDtype("float64"),\n IntervalArray([pd.Interval(1.0, 2.7), pd.Interval(2.8, 3.1)]),\n {pd.Interval(1.0, 2.7): pd.Interval(10.6, 20.8)},\n IntervalArray([pd.Interval(10.6, 20.8), pd.Interval(2.8, 3.1)]),\n ),\n (\n pd.PeriodDtype("M"),\n [pd.Period("2020-05", freq="M")],\n {pd.Period("2020-05", freq="M"): pd.Period("2020-06", freq="M")},\n [pd.Period("2020-06", freq="M")],\n ),\n ],\n )\n def test_replace_dtype(self, dtype, input_data, to_replace, expected_data):\n # GH#33484\n ser = pd.Series(input_data, dtype=dtype)\n result = ser.replace(to_replace)\n expected = pd.Series(expected_data, dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n def test_replace_string_dtype(self):\n # GH#40732, GH#44940\n ser = pd.Series(["one", "two", np.nan], dtype="string")\n res = ser.replace({"one": "1", "two": "2"})\n expected = pd.Series(["1", "2", np.nan], dtype="string")\n tm.assert_series_equal(res, expected)\n\n # GH#31644\n ser2 = pd.Series(["A", np.nan], dtype="string")\n res2 = ser2.replace("A", "B")\n expected2 = pd.Series(["B", np.nan], dtype="string")\n tm.assert_series_equal(res2, expected2)\n\n ser3 = pd.Series(["A", "B"], dtype="string")\n res3 = ser3.replace("A", pd.NA)\n expected3 = pd.Series([pd.NA, "B"], dtype="string")\n tm.assert_series_equal(res3, expected3)\n\n def test_replace_string_dtype_list_to_replace(self):\n # GH#41215, GH#44940\n ser = pd.Series(["abc", "def"], dtype="string")\n res = ser.replace(["abc", "any other string"], "xyz")\n expected = pd.Series(["xyz", "def"], dtype="string")\n tm.assert_series_equal(res, expected)\n\n def test_replace_string_dtype_regex(self):\n # GH#31644\n ser = pd.Series(["A", "B"], dtype="string")\n res = ser.replace(r".", "C", regex=True)\n expected = pd.Series(["C", "C"], dtype="string")\n tm.assert_series_equal(res, expected)\n\n def test_replace_nullable_numeric(self):\n # GH#40732, GH#44940\n\n floats = pd.Series([1.0, 2.0, 3.999, 4.4], dtype=pd.Float64Dtype())\n assert floats.replace({1.0: 9}).dtype == floats.dtype\n assert floats.replace(1.0, 9).dtype == floats.dtype\n assert floats.replace({1.0: 9.0}).dtype == floats.dtype\n assert floats.replace(1.0, 9.0).dtype == floats.dtype\n\n res = floats.replace(to_replace=[1.0, 2.0], value=[9.0, 10.0])\n assert res.dtype == floats.dtype\n\n ints = pd.Series([1, 2, 3, 4], dtype=pd.Int64Dtype())\n assert ints.replace({1: 9}).dtype == ints.dtype\n assert ints.replace(1, 9).dtype == ints.dtype\n assert ints.replace({1: 9.0}).dtype == ints.dtype\n assert ints.replace(1, 9.0).dtype == ints.dtype\n\n # nullable (for now) raises instead of casting\n with pytest.raises(TypeError, match="Invalid value"):\n ints.replace({1: 9.5})\n with pytest.raises(TypeError, match="Invalid value"):\n ints.replace(1, 9.5)\n\n @pytest.mark.parametrize("regex", [False, True])\n def test_replace_regex_dtype_series(self, regex):\n # GH-48644\n series = pd.Series(["0"], dtype=object)\n expected = pd.Series([1])\n msg = "Downcasting behavior in `replace`"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = series.replace(to_replace="0", value=1, regex=regex)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("regex", [False, True])\n def test_replace_regex_dtype_series_string(self, regex):\n series = pd.Series(["0"], dtype="str")\n expected = pd.Series([1], dtype="int64")\n msg = "Downcasting behavior in `replace`"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = series.replace(to_replace="0", value=1, regex=regex)\n tm.assert_series_equal(result, expected)\n\n def test_replace_different_int_types(self, any_int_numpy_dtype):\n # GH#45311\n labs = pd.Series([1, 1, 1, 0, 0, 2, 2, 2], dtype=any_int_numpy_dtype)\n\n maps = pd.Series([0, 2, 1], dtype=any_int_numpy_dtype)\n map_dict = dict(zip(maps.values, maps.index))\n\n result = labs.replace(map_dict)\n expected = labs.replace({0: 0, 2: 1, 1: 2})\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("val", [2, np.nan, 2.0])\n def test_replace_value_none_dtype_numeric(self, val):\n # GH#48231\n ser = pd.Series([1, val])\n result = ser.replace(val, None)\n expected = pd.Series([1, None], dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_replace_change_dtype_series(self):\n # GH#25797\n df = pd.DataFrame({"Test": ["0.5", True, "0.6"]}, dtype=object)\n df["Test"] = df["Test"].replace([True], [np.nan])\n expected = pd.DataFrame({"Test": ["0.5", np.nan, "0.6"]}, dtype=object)\n tm.assert_frame_equal(df, expected)\n\n df = pd.DataFrame({"Test": ["0.5", None, "0.6"]}, dtype=object)\n df["Test"] = df["Test"].replace([None], [np.nan])\n tm.assert_frame_equal(df, expected)\n\n df = pd.DataFrame({"Test": ["0.5", None, "0.6"]}, dtype=object)\n df["Test"] = df["Test"].fillna(np.nan)\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize("dtype", ["object", "Int64"])\n def test_replace_na_in_obj_column(self, dtype):\n # GH#47480\n ser = pd.Series([0, 1, pd.NA], dtype=dtype)\n expected = pd.Series([0, 2, pd.NA], dtype=dtype)\n result = ser.replace(to_replace=1, value=2)\n tm.assert_series_equal(result, expected)\n\n ser.replace(to_replace=1, value=2, inplace=True)\n tm.assert_series_equal(ser, expected)\n\n @pytest.mark.parametrize("val", [0, 0.5])\n def test_replace_numeric_column_with_na(self, val):\n # GH#50758\n ser = pd.Series([val, 1])\n expected = pd.Series([val, pd.NA])\n result = ser.replace(to_replace=1, value=pd.NA)\n tm.assert_series_equal(result, expected)\n\n ser.replace(to_replace=1, value=pd.NA, inplace=True)\n tm.assert_series_equal(ser, expected)\n\n def test_replace_ea_float_with_bool(self):\n # GH#55398\n ser = pd.Series([0.0], dtype="Float64")\n expected = ser.copy()\n result = ser.replace(False, 1.0)\n tm.assert_series_equal(result, expected)\n\n ser = pd.Series([False], dtype="boolean")\n expected = ser.copy()\n result = ser.replace(0.0, True)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_replace.py | test_replace.py | Python | 32,057 | 0.95 | 0.085784 | 0.106725 | react-lib | 871 | 2025-02-12T18:44:30.531164 | MIT | true | 5c8bd096617f5c2e53390bf6f152919d |
from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n RangeIndex,\n Series,\n date_range,\n option_context,\n)\nimport pandas._testing as tm\n\n\nclass TestResetIndex:\n def test_reset_index_dti_round_trip(self):\n dti = date_range(start="1/1/2001", end="6/1/2001", freq="D")._with_freq(None)\n d1 = DataFrame({"v": np.random.default_rng(2).random(len(dti))}, index=dti)\n d2 = d1.reset_index()\n assert d2.dtypes.iloc[0] == np.dtype("M8[ns]")\n d3 = d2.set_index("index")\n tm.assert_frame_equal(d1, d3, check_names=False)\n\n # GH#2329\n stamp = datetime(2012, 11, 22)\n df = DataFrame([[stamp, 12.1]], columns=["Date", "Value"])\n df = df.set_index("Date")\n\n assert df.index[0] == stamp\n assert df.reset_index()["Date"].iloc[0] == stamp\n\n def test_reset_index(self):\n df = DataFrame(\n 1.1 * np.arange(120).reshape((30, 4)),\n columns=Index(list("ABCD"), dtype=object),\n index=Index([f"i-{i}" for i in range(30)], dtype=object),\n )[:5]\n ser = df.stack(future_stack=True)\n ser.index.names = ["hash", "category"]\n\n ser.name = "value"\n df = ser.reset_index()\n assert "value" in df\n\n df = ser.reset_index(name="value2")\n assert "value2" in df\n\n # check inplace\n s = ser.reset_index(drop=True)\n s2 = ser\n return_value = s2.reset_index(drop=True, inplace=True)\n assert return_value is None\n tm.assert_series_equal(s, s2)\n\n # level\n index = MultiIndex(\n levels=[["bar"], ["one", "two", "three"], [0, 1]],\n codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],\n )\n s = Series(np.random.default_rng(2).standard_normal(6), index=index)\n rs = s.reset_index(level=1)\n assert len(rs.columns) == 2\n\n rs = s.reset_index(level=[0, 2], drop=True)\n tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))\n assert isinstance(rs, Series)\n\n def test_reset_index_name(self):\n s = Series([1, 2, 3], index=Index(range(3), name="x"))\n assert s.reset_index().index.name is None\n assert s.reset_index(drop=True).index.name is None\n\n def test_reset_index_level(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])\n\n for levels in ["A", "B"], [0, 1]:\n # With MultiIndex\n s = df.set_index(["A", "B"])["C"]\n\n result = s.reset_index(level=levels[0])\n tm.assert_frame_equal(result, df.set_index("B"))\n\n result = s.reset_index(level=levels[:1])\n tm.assert_frame_equal(result, df.set_index("B"))\n\n result = s.reset_index(level=levels)\n tm.assert_frame_equal(result, df)\n\n result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)\n tm.assert_frame_equal(result, df[["C"]])\n\n with pytest.raises(KeyError, match="Level E "):\n s.reset_index(level=["A", "E"])\n\n # With single-level Index\n s = df.set_index("A")["B"]\n\n result = s.reset_index(level=levels[0])\n tm.assert_frame_equal(result, df[["A", "B"]])\n\n result = s.reset_index(level=levels[:1])\n tm.assert_frame_equal(result, df[["A", "B"]])\n\n result = s.reset_index(level=levels[0], drop=True)\n tm.assert_series_equal(result, df["B"])\n\n with pytest.raises(IndexError, match="Too many levels"):\n s.reset_index(level=[0, 1, 2])\n\n # Check that .reset_index([],drop=True) doesn't fail\n result = Series(range(4)).reset_index([], drop=True)\n expected = Series(range(4))\n tm.assert_series_equal(result, expected)\n\n def test_reset_index_range(self):\n # GH 12071\n s = Series(range(2), name="A", dtype="int64")\n series_result = s.reset_index()\n assert isinstance(series_result.index, RangeIndex)\n series_expected = DataFrame(\n [[0, 0], [1, 1]], columns=["index", "A"], index=RangeIndex(stop=2)\n )\n tm.assert_frame_equal(series_result, series_expected)\n\n def test_reset_index_drop_errors(self):\n # GH 20925\n\n # KeyError raised for series index when passed level name is missing\n s = Series(range(4))\n with pytest.raises(KeyError, match="does not match index name"):\n s.reset_index("wrong", drop=True)\n with pytest.raises(KeyError, match="does not match index name"):\n s.reset_index("wrong")\n\n # KeyError raised for series when level to be dropped is missing\n s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2))\n with pytest.raises(KeyError, match="not found"):\n s.reset_index("wrong", drop=True)\n\n def test_reset_index_with_drop(self):\n arrays = [\n ["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"],\n ["one", "two", "one", "two", "one", "two", "one", "two"],\n ]\n tuples = zip(*arrays)\n index = MultiIndex.from_tuples(tuples)\n data = np.random.default_rng(2).standard_normal(8)\n ser = Series(data, index=index)\n ser.iloc[3] = np.nan\n\n deleveled = ser.reset_index()\n assert isinstance(deleveled, DataFrame)\n assert len(deleveled.columns) == len(ser.index.levels) + 1\n assert deleveled.index.name == ser.index.name\n\n deleveled = ser.reset_index(drop=True)\n assert isinstance(deleveled, Series)\n assert deleveled.index.name == ser.index.name\n\n def test_reset_index_inplace_and_drop_ignore_name(self):\n # GH#44575\n ser = Series(range(2), name="old")\n ser.reset_index(name="new", drop=True, inplace=True)\n expected = Series(range(2), name="old")\n tm.assert_series_equal(ser, expected)\n\n def test_reset_index_drop_infer_string(self):\n # GH#56160\n pytest.importorskip("pyarrow")\n ser = Series(["a", "b", "c"], dtype=object)\n with option_context("future.infer_string", True):\n result = ser.reset_index(drop=True)\n tm.assert_series_equal(result, ser)\n\n\n@pytest.mark.parametrize(\n "array, dtype",\n [\n (["a", "b"], object),\n (\n pd.period_range("12-1-2000", periods=2, freq="Q-DEC"),\n pd.PeriodDtype(freq="Q-DEC"),\n ),\n ],\n)\ndef test_reset_index_dtypes_on_empty_series_with_multiindex(\n array, dtype, using_infer_string\n):\n # GH 19602 - Preserve dtype on empty Series with MultiIndex\n idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], array])\n result = Series(dtype=object, index=idx)[:0].reset_index().dtypes\n exp = "str" if using_infer_string else object\n expected = Series(\n {\n "level_0": np.int64,\n "level_1": np.float64,\n "level_2": exp if dtype == object else dtype,\n 0: object,\n }\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "names, expected_names",\n [\n (["A", "A"], ["A", "A"]),\n (["level_1", None], ["level_1", "level_1"]),\n ],\n)\n@pytest.mark.parametrize("allow_duplicates", [False, True])\ndef test_column_name_duplicates(names, expected_names, allow_duplicates):\n # GH#44755 reset_index with duplicate column labels\n s = Series([1], index=MultiIndex.from_arrays([[1], [1]], names=names))\n if allow_duplicates:\n result = s.reset_index(allow_duplicates=True)\n expected = DataFrame([[1, 1, 1]], columns=expected_names + [0])\n tm.assert_frame_equal(result, expected)\n else:\n with pytest.raises(ValueError, match="cannot insert"):\n s.reset_index()\n | .venv\Lib\site-packages\pandas\tests\series\methods\test_reset_index.py | test_reset_index.py | Python | 7,845 | 0.95 | 0.084444 | 0.075269 | react-lib | 611 | 2025-04-04T09:46:31.741697 | GPL-3.0 | true | b9c67d58eaeadbee30b8f921e4566571 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.